fix vote wipe mechanism

This commit is contained in:
byte[] 2020-01-01 18:29:49 -05:00
parent 0f4a231357
commit e6f1885476

View file

@ -1,4 +1,6 @@
defmodule Philomena.UserDownvoteWipe do defmodule Philomena.UserDownvoteWipe do
alias Philomena.Batch
alias Philomena.Elasticsearch
alias Philomena.Images.Image alias Philomena.Images.Image
alias Philomena.Images alias Philomena.Images
alias Philomena.ImageVotes.ImageVote alias Philomena.ImageVotes.ImageVote
@ -9,64 +11,41 @@ defmodule Philomena.UserDownvoteWipe do
def perform(user, upvotes_and_faves_too \\ false) do def perform(user, upvotes_and_faves_too \\ false) do
ImageVote ImageVote
|> where(user_id: ^user.id, up: false) |> where(user_id: ^user.id, up: false)
|> in_batches(fn queryable, image_ids -> |> Batch.query_batches([id_field: :image_id], fn queryable ->
Repo.delete_all(where(queryable, [iv], iv.image_id in ^image_ids)) {_, image_ids} = Repo.delete_all(select(queryable, [i_v], i_v.image_id))
Repo.update_all(where(Image, [i], i.id in ^image_ids), inc: [downvotes_count: -1, score: 1]) Repo.update_all(where(Image, [i], i.id in ^image_ids), inc: [downvotes_count: -1, score: 1])
Images.reindex_images(image_ids)
# Allow time for indexing to catch up reindex(image_ids)
:timer.sleep(:timer.seconds(10))
end) end)
if upvotes_and_faves_too do if upvotes_and_faves_too do
ImageVote ImageVote
|> where(user_id: ^user.id, up: true) |> where(user_id: ^user.id, up: true)
|> in_batches(fn queryable, image_ids -> |> Batch.query_batches([id_field: :image_id], fn queryable ->
Repo.delete_all(where(queryable, [iv], iv.image_id in ^image_ids)) {_, image_ids} = Repo.delete_all(select(queryable, [i_v], i_v.image_id))
Repo.update_all(where(Image, [i], i.id in ^image_ids), inc: [upvotes_count: -1, score: -1]) Repo.update_all(where(Image, [i], i.id in ^image_ids), inc: [upvotes_count: -1, score: -1])
Images.reindex_images(image_ids)
reindex(image_ids)
:timer.sleep(:timer.seconds(10))
end) end)
ImageFave ImageFave
|> where(user_id: ^user.id) |> where(user_id: ^user.id)
|> in_batches(fn queryable, image_ids -> |> Batch.query_batches([id_field: :image_id], fn queryable ->
Repo.delete_all(where(queryable, [iv], iv.image_id in ^image_ids)) {_, image_ids} = Repo.delete_all(select(queryable, [i_f], i_f.image_id))
Repo.update_all(where(Image, [i], i.id in ^image_ids), inc: [faves_count: -1]) Repo.update_all(where(Image, [i], i.id in ^image_ids), inc: [faves_count: -1])
Images.reindex_images(image_ids)
:timer.sleep(:timer.seconds(10)) reindex(image_ids)
end) end)
end end
end end
# todo: extract this to its own module somehow defp reindex(image_ids) do
defp in_batches(queryable, mapper) do Image
queryable = order_by(queryable, asc: :image_id) |> where([i], i.id in ^image_ids)
|> preload(^Images.indexing_preloads())
|> Elasticsearch.reindex(Image)
ids = # allow time for indexing to catch up
queryable :timer.sleep(:timer.seconds(10))
|> select([q], q.image_id)
|> limit(1000)
|> Repo.all()
queryable
|> in_batches(mapper, 1000, ids)
end
defp in_batches(_queryable, _mapper, _batch_size, []), do: nil
defp in_batches(queryable, mapper, batch_size, ids) do
mapper.(exclude(queryable, :order_by), ids)
ids =
queryable
|> where([q], q.image_id > ^Enum.max(ids))
|> select([q], q.image_id)
|> limit(^batch_size)
|> Repo.all()
in_batches(queryable, mapper, batch_size, ids)
end end
end end