# -*- coding: utf-8 -*- # Copyright 2019 Mike Fährmann # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. """Generic extractors for *reactor sites""" from .common import Extractor, Message, SharedConfigMixin from .. import text import urllib.parse import random import time import json BASE_PATTERN = r"(?:https?://)?((?:[^/.]+\.)?reactor\.cc)" class ReactorExtractor(SharedConfigMixin, Extractor): """Base class for *reactor.cc extractors""" basecategory = "reactor" filename_fmt = "{post_id}_{num:>02}{title[:100]:?_//}.{extension}" archive_fmt = "{post_id}_{num}" def __init__(self, match): Extractor.__init__(self, match) self.root = "http://" + match.group(1) self.session.headers["Referer"] = self.root self.wait_min = self.config("wait-min", 3) self.wait_max = self.config("wait-max", 6) if self.wait_max < self.wait_min: self.wait_max = self.wait_min if not self.category: # set category based on domain name netloc = urllib.parse.urlsplit(self.root).netloc self.category = netloc.rpartition(".")[0] def items(self): data = self.metadata() yield Message.Version, 1 yield Message.Directory, data for post in self.posts(): for image in self._parse_post(post): url = image["url"] image.update(data) yield Message.Url, url, text.nameext_from_url(url, image) def metadata(self): """Collect metadata for extractor-job""" return {} def posts(self): """Return all relevant post-objects""" return self._pagination(self.url) def _pagination(self, url): while True: time.sleep(random.uniform(self.wait_min, self.wait_max)) response = self.request(url) if response.history: # sometimes there is a redirect from # the last page of a listing (.../tag//1) # to the first page (.../tag/) # which could cause an endless loop cnt_old = response.history[0].url.count("/") cnt_new = response.url.count("/") if cnt_old == 5 and cnt_new == 4: return page = response.text yield from text.extract_iter( page, '
', '
') try: pos = page.index("class='next'") pos = page.rindex("class='current'", 0, pos) url = self.root + text.extract(page, "href='", "'", pos)[0] except (ValueError, TypeError): return def _parse_post(self, post): post, _, script = post.partition('