# -*- coding: utf-8 -*- # Copyright 2014-2017 Mike Fährmann # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. """Extract images from https://chan.sankakucomplex.com/""" from .common import SharedConfigExtractor, Message from .. import text, util, exception from ..cache import cache import time import random class SankakuExtractor(SharedConfigExtractor): """Base class for sankaku extractors""" basecategory = "booru" category = "sankaku" filename_fmt = "{category}_{id}_{md5}.{extension}" root = "https://chan.sankakucomplex.com" cookienames = ("login", "pass_hash") cookiedomain = "chan.sankakucomplex.com" def __init__(self): SharedConfigExtractor.__init__(self) self.logged_in = True self.start_page = 1 self.start_post = 0 self.wait_min = self.config("wait-min", 2) self.wait_max = self.config("wait-max", 4) if self.wait_max < self.wait_min: self.wait_max = self.wait_min def items(self): self.login() yield Message.Version, 1 yield Message.Directory, self.get_metadata() for post_id in util.advance(self.get_posts(), self.start_post): self.wait() data = self.get_post_data(post_id) url = data["file_url"] yield Message.Url, url, text.nameext_from_url(url, data) def skip(self, num): self.start_post += num return num def get_metadata(self): """Return general metadata""" return {} def get_posts(self): """Return an iterable containing all relevant post ids""" def get_post_data(self, post_id, extr=text.extract): """Extract metadata of a single post""" url = self.root + "/post/show/" + post_id page = self.request(url, retries=10).text tags , pos = extr(page, "", " | Sankaku Channel") vavg , pos = extr(page, "itemprop=ratingValue>", "<", pos) vcnt , pos = extr(page, "itemprop=reviewCount>", "<", pos) _ , pos = extr(page, "Posted: <", "", pos) created, pos = extr(page, ' title="', '"', pos) rating = extr(page, "
  • Rating: ", "<", pos)[0] file_url, pos = extr(page, '
  • Original: ', 'x', pos) height, pos = extr(page, '', ' ', pos) else: width , pos = extr(page, '', pos) file_url = extr(page, ' 49: self.log.info("Cannot skip more than 50 pages ahead.") pages, posts = 49, self.per_page self.start_page += pages self.start_post += posts return pages * self.per_page + posts def get_metadata(self): tags = self.tags.split() if not self.logged_in and len(tags) > 4: self.log.error("Unauthenticated users cannot use " "more than 4 tags at once.") raise exception.StopExtraction() return {"tags": " ".join(tags)} def get_posts(self): params = {"tags": self.tags, "page": self.start_page} while self.logged_in or params["page"] <= 25: page = self.request(self.root, params=params, retries=10).text pos = page.find("