AWeirdDev's picture
release scraper code
7a093ca verified
raw
history blame
No virus
2.12 kB
import requests
from datasets import Dataset
from selectolax.lexbor import LexborHTMLParser
# How many pages to seek for article recommendations?
# (https://www.storm.mg/articles/{page_id})
N_PAGES_OF_ARTICLES_RECOMMENDATIONS = 100
base_url = "https://www.storm.mg/articles/%i"
user_agent = (
# use mine, or put your user agent here
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/121.0.0.0 Safari/537.36 OPR/107.0.0.0"
)
def read_article(link: str):
"""Read an article on www.storm.mg."""
r = requests.get(link, headers={ "User-Agent": user_agent })
r.raise_for_status()
contents = []
parser = LexborHTMLParser(r.text)
for paragraph in parser.css("p[aid]"):
contents.append(paragraph.text(separator=" ", strip=True))
return contents
def generate_dataset():
"""Generate the dataset."""
for page_id in range(N_PAGES_OF_ARTICLES_RECOMMENDATIONS):
r = requests.get(base_url % (page_id + 1), headers={
"User-Agent": user_agent
})
r.raise_for_status()
parser = LexborHTMLParser(r.text)
articles = parser.css(".category_cards_wrapper .category_card.card_thumbs_left")
for article in articles:
image = article.css_first("img").attributes['src']
title = article.css_first(".card_title").text()
tag = article.css_first(".tags_wrapper a").text()
info = article.css_first("p.card_info.right")
author = info.css_first(".info_author").text()
timestamp = info.css_first(".info_time").text()
link = article.css_first(".link_title").attributes['href']
yield {
"image": image,
"title": title,
"content": "\n".join(read_article(link)),
"tag": tag,
"author": author,
"timestamp": timestamp,
"link": link
}
dataset = Dataset.from_generator(generate_dataset)
dataset.save_to_disk(
f"storm-org-articles-{20 * N_PAGES_OF_ARTICLES_RECOMMENDATIONS}"
)