mirror of
https://github.com/philomena-dev/philomena.git
synced 2025-02-17 11:04:22 +01:00
fix vote wipe mechanism
This commit is contained in:
parent
0f4a231357
commit
e6f1885476
1 changed files with 19 additions and 40 deletions
|
@ -1,4 +1,6 @@
|
|||
defmodule Philomena.UserDownvoteWipe do
|
||||
alias Philomena.Batch
|
||||
alias Philomena.Elasticsearch
|
||||
alias Philomena.Images.Image
|
||||
alias Philomena.Images
|
||||
alias Philomena.ImageVotes.ImageVote
|
||||
|
@ -9,64 +11,41 @@ defmodule Philomena.UserDownvoteWipe do
|
|||
def perform(user, upvotes_and_faves_too \\ false) do
|
||||
ImageVote
|
||||
|> where(user_id: ^user.id, up: false)
|
||||
|> in_batches(fn queryable, image_ids ->
|
||||
Repo.delete_all(where(queryable, [iv], iv.image_id in ^image_ids))
|
||||
|> Batch.query_batches([id_field: :image_id], fn queryable ->
|
||||
{_, image_ids} = Repo.delete_all(select(queryable, [i_v], i_v.image_id))
|
||||
Repo.update_all(where(Image, [i], i.id in ^image_ids), inc: [downvotes_count: -1, score: 1])
|
||||
Images.reindex_images(image_ids)
|
||||
|
||||
# Allow time for indexing to catch up
|
||||
:timer.sleep(:timer.seconds(10))
|
||||
reindex(image_ids)
|
||||
end)
|
||||
|
||||
if upvotes_and_faves_too do
|
||||
ImageVote
|
||||
|> where(user_id: ^user.id, up: true)
|
||||
|> in_batches(fn queryable, image_ids ->
|
||||
Repo.delete_all(where(queryable, [iv], iv.image_id in ^image_ids))
|
||||
|> Batch.query_batches([id_field: :image_id], fn queryable ->
|
||||
{_, image_ids} = Repo.delete_all(select(queryable, [i_v], i_v.image_id))
|
||||
Repo.update_all(where(Image, [i], i.id in ^image_ids), inc: [upvotes_count: -1, score: -1])
|
||||
Images.reindex_images(image_ids)
|
||||
|
||||
:timer.sleep(:timer.seconds(10))
|
||||
|
||||
reindex(image_ids)
|
||||
end)
|
||||
|
||||
ImageFave
|
||||
|> where(user_id: ^user.id)
|
||||
|> in_batches(fn queryable, image_ids ->
|
||||
Repo.delete_all(where(queryable, [iv], iv.image_id in ^image_ids))
|
||||
|> Batch.query_batches([id_field: :image_id], fn queryable ->
|
||||
{_, image_ids} = Repo.delete_all(select(queryable, [i_f], i_f.image_id))
|
||||
Repo.update_all(where(Image, [i], i.id in ^image_ids), inc: [faves_count: -1])
|
||||
Images.reindex_images(image_ids)
|
||||
|
||||
:timer.sleep(:timer.seconds(10))
|
||||
reindex(image_ids)
|
||||
end)
|
||||
end
|
||||
end
|
||||
|
||||
# todo: extract this to its own module somehow
|
||||
defp in_batches(queryable, mapper) do
|
||||
queryable = order_by(queryable, asc: :image_id)
|
||||
defp reindex(image_ids) do
|
||||
Image
|
||||
|> where([i], i.id in ^image_ids)
|
||||
|> preload(^Images.indexing_preloads())
|
||||
|> Elasticsearch.reindex(Image)
|
||||
|
||||
ids =
|
||||
queryable
|
||||
|> select([q], q.image_id)
|
||||
|> limit(1000)
|
||||
|> Repo.all()
|
||||
|
||||
queryable
|
||||
|> in_batches(mapper, 1000, ids)
|
||||
end
|
||||
|
||||
defp in_batches(_queryable, _mapper, _batch_size, []), do: nil
|
||||
|
||||
defp in_batches(queryable, mapper, batch_size, ids) do
|
||||
mapper.(exclude(queryable, :order_by), ids)
|
||||
|
||||
ids =
|
||||
queryable
|
||||
|> where([q], q.image_id > ^Enum.max(ids))
|
||||
|> select([q], q.image_id)
|
||||
|> limit(^batch_size)
|
||||
|> Repo.all()
|
||||
|
||||
in_batches(queryable, mapper, batch_size, ids)
|
||||
# allow time for indexing to catch up
|
||||
:timer.sleep(:timer.seconds(10))
|
||||
end
|
||||
end
|
||||
|
|
Loading…
Reference in a new issue