philomena/lib/philomena_proxy/scrapers/twitter.ex

42 lines
1.1 KiB
Elixir
Raw Permalink Normal View History

defmodule PhilomenaProxy.Scrapers.Twitter do
@moduledoc false
alias PhilomenaProxy.Scrapers.Scraper
alias PhilomenaProxy.Scrapers
@behaviour Scraper
2024-03-23 16:56:17 +01:00
@url_regex ~r|\Ahttps?://(?:mobile\.)?(?:twitter\|x).com/([A-Za-z\d_]+)/status/([\d]+)/?|
2019-11-28 18:12:10 +01:00
@spec can_handle?(URI.t(), String.t()) :: boolean()
2019-11-28 18:12:10 +01:00
def can_handle?(_uri, url) do
String.match?(url, @url_regex)
end
@spec scrape(URI.t(), Scrapers.url()) :: Scrapers.scrape_result()
2019-11-28 18:12:10 +01:00
def scrape(_uri, url) do
[user, status_id] = Regex.run(@url_regex, url, capture: :all_but_first)
api_url = "https://api.fxtwitter.com/#{user}/status/#{status_id}"
2024-06-20 05:03:44 +02:00
{:ok, %{status: 200, body: body}} = PhilomenaProxy.Http.get(api_url)
2019-11-28 18:12:10 +01:00
json = Jason.decode!(body)
tweet = json["tweet"]
images =
Enum.map(tweet["media"]["photos"], fn p ->
%{
url: "#{p["url"]}:orig",
camo_url: PhilomenaProxy.Camo.image_url(p["url"])
}
end)
2019-11-28 18:12:10 +01:00
%{
source_url: tweet["url"],
author_name: tweet["author"]["screen_name"],
description: tweet["text"],
images: images
}
2019-11-28 18:12:10 +01:00
end
2019-12-19 00:51:02 +01:00
end