parent
8c7da93d19
commit
1f4866fcd6
@ -1,62 +1,27 @@
|
|||||||
from .common import AsyncExtractor
|
# -*- coding: utf-8 -*-
|
||||||
from ..util import filename_from_url
|
|
||||||
import xml.etree.ElementTree as ET
|
|
||||||
import json
|
|
||||||
import urllib.parse
|
|
||||||
|
|
||||||
class BooruExtractor(AsyncExtractor):
|
# Copyright 2014, 2015 Mike Fährmann
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License version 2 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
|
||||||
def __init__(self, match, config):
|
"""Extract image-urls from https://danbooru.donmai.us/"""
|
||||||
AsyncExtractor.__init__(self, config)
|
|
||||||
self.tags = urllib.parse.unquote(match.group(1))
|
|
||||||
self.category = "booru"
|
|
||||||
self.params = {"tags": self.tags}
|
|
||||||
self.page = "page"
|
|
||||||
self.directory = self.tags.replace("/", "_")
|
|
||||||
|
|
||||||
def update_page(self, reset=False):
|
|
||||||
# Override this method in derived classes if necessary.
|
|
||||||
# It is usually enough to adjust the 'page' attribute
|
|
||||||
if reset is False:
|
|
||||||
self.params[self.page] += 1
|
|
||||||
else:
|
|
||||||
self.params[self.page] = 1
|
|
||||||
|
|
||||||
class JSONBooruExtractor(BooruExtractor):
|
|
||||||
|
|
||||||
def images(self):
|
|
||||||
self.update_page(reset=True)
|
|
||||||
while True:
|
|
||||||
images = json.loads(
|
|
||||||
self.request(self.api_url, verify=True, params=self.params).text
|
|
||||||
)
|
|
||||||
if len(images) == 0:
|
|
||||||
return
|
|
||||||
for img in images:
|
|
||||||
url = urllib.parse.urljoin(self.api_url, img["file_url"])
|
|
||||||
name = "{}_{}".format(self.category, filename_from_url(url))
|
|
||||||
yield url, name
|
|
||||||
self.update_page()
|
|
||||||
|
|
||||||
class XMLBooruExtractor(BooruExtractor):
|
from .booru import JSONBooruExtractor
|
||||||
|
|
||||||
def images(self):
|
info = {
|
||||||
self.update_page(reset=True)
|
"category": "danbooru",
|
||||||
while True:
|
"extractor": "DanbooruExtractor",
|
||||||
root = ET.fromstring(
|
"directory": ["{category}", "{tags}"],
|
||||||
self.request(self.api_url, verify=True, params=self.params).text
|
"filename": "{category}_{name}",
|
||||||
)
|
"pattern": [
|
||||||
if len(root) == 0:
|
r"(?:https?://)?(?:www\.)?danbooru.donmai.us/posts\?(?:utf8=%E2%9C%93&)?tags=([^&]+).*",
|
||||||
return
|
],
|
||||||
for item in root:
|
}
|
||||||
url = item.attrib["file_url"]
|
|
||||||
name = "{}_{}".format(self.category, filename_from_url(url))
|
|
||||||
yield url, name
|
|
||||||
self.update_page()
|
|
||||||
|
|
||||||
class Extractor(JSONBooruExtractor):
|
class DanbooruExtractor(JSONBooruExtractor):
|
||||||
|
|
||||||
def __init__(self, match, config):
|
def __init__(self, match, config):
|
||||||
JSONBooruExtractor.__init__(self, match, config)
|
JSONBooruExtractor.__init__(self, match, config, info)
|
||||||
self.category = "danbooru"
|
|
||||||
self.api_url = "https://danbooru.donmai.us/posts.json"
|
self.api_url = "https://danbooru.donmai.us/posts.json"
|
||||||
|
Loading…
Reference in new issue