[gelbooru] use manual extraction

... to compensate for their disabled API.
(https://gelbooru.com/index.php?page=forum&s=view&id=3875)

This also adds an extractor for image-pools.
pull/54/head
Mike Fährmann 7 years ago
parent 55c64cad4b
commit 214972bc9a
No known key found for this signature in database
GPG Key ID: 5680CA389D365A88

@ -22,7 +22,7 @@ Fallen Angels Scans https://www.fascans.com/ Chapters, Manga
Fireden https://boards.fireden.net/ Threads
Flickr https://www.flickr.com/ |Images from Use-2| Optional (OAuth)
Futaba Channel https://www.2chan.net/ Threads
Gelbooru https://gelbooru.com/ Posts, Tag-Searches
Gelbooru https://gelbooru.com/ Pools, Posts, Tag-Searches
Gfycat https://gfycat.com/ individual Images
GoManga https://gomanga.co/ Chapters, Manga
HBrowse http://www.hbrowse.com/ Chapters, Manga

@ -301,7 +301,7 @@ class DeviantartCollectionExtractor(DeviantartExtractor):
test = [(("https://pencilshadings.deviantart.com"
"/favourites/70595441/3D-Favorites"), {
"url": "36ea299132a6b0a0cd319318e9bf18ad32e9b8cc",
"keyword": "6aada4e3159ca09ff8de9bae880952ce3b46d529",
"keyword": "e32b1840f1e09d1671bdc0f8a7a72472a0df57a8",
"options": (("original", False),),
})]

@ -8,40 +8,122 @@
"""Extract images from https://gelbooru.com/"""
from . import booru
from .common import SharedConfigExtractor, Message
from .. import text, util
class GelbooruExtractor(booru.XMLBooruExtractor):
class GelbooruExtractor(SharedConfigExtractor):
"""Base class for gelbooru extractors"""
basecategory = "booru"
category = "gelbooru"
api_url = "https://gelbooru.com/"
pagestart = 0
pagekey = "pid"
filename_fmt = "{category}_{id}_{md5}.{extension}"
def setup(self):
self.params.update({"page": "dapi", "s": "post", "q": "index"})
def items(self):
yield Message.Version, 1
yield Message.Directory, self.get_metadata()
for post_id in self.posts():
data = self.get_post_data(post_id)
url = data["file_url"]
yield Message.Url, url, text.nameext_from_url(url, data)
class GelbooruTagExtractor(GelbooruExtractor, booru.BooruTagExtractor):
def posts(self):
"""Return an iterable containing all relevant post ids"""
def get_metadata(self):
"""Return general metadata"""
return {}
def get_post_data(self, post_id):
"""Extract metadata of a single post"""
page = self.request("https://gelbooru.com/index.php?page=post&s=view"
"&id=" + post_id).text
data = text.extract_all(page, (
(None , '<meta name="keywords"', ''),
("tags" , ' imageboard, ', '"'),
("id" , '<li>Id: ', '<'),
("created_at", '<li>Posted: ', '<'),
("width" , '<li>Size: ', 'x'),
("height" , '', '<'),
("source" , '<li>Source: <a href="', '"'),
("rating" , '<li>Rating: ', '<'),
(None , '<li>Score: ', ''),
("score" , '>', '<'),
("file_url" , '<li><a href="http', '"'),
))[0]
data["file_url"] = "http" + data["file_url"]
data["md5"] = data["file_url"].rpartition("/")[2].partition(".")[0]
for key in ("id", "width", "height", "score"):
data[key] = util.safe_int(data[key])
return data
class GelbooruTagExtractor(GelbooruExtractor):
"""Extractor for images from gelbooru.com based on search-tags"""
subcategory = "tag"
directory_fmt = ["{category}", "{tags}"]
pattern = [r"(?:https?://)?(?:www\.)?gelbooru\.com/(?:index\.php)?"
r"\?page=post&s=list&tags=([^&]+)"]
test = [("http://gelbooru.com/index.php?page=post&s=list&tags=bonocho", {
"content": "b196fb9f1668109d7774a0a82efea3ffdda07746",
test = [("https://gelbooru.com/index.php?page=post&s=list&tags=bonocho", {
"count": 5,
})]
def __init__(self, match):
GelbooruExtractor.__init__(self)
self.tags = text.unquote(match.group(1).replace("+", " "))
# TODO: find out how to access pools via gelbooru-api
# class GelbooruPoolExtractor(GelbooruExtractor, booru.BooruPoolExtractor):
# """Extractor for image-pools from gelbooru.com"""
# pattern = [r"(?:https?://)?(?:www\.)?gelbooru\.com/(?:index\.php)?"
# r"\?page=pool&s=show&id=(\d+)"]
def get_metadata(self):
return {"tags": self.tags}
def posts(self):
url = "https://gelbooru.com/index.php?page=post&s=list"
params = {"tags": self.tags, "pid": 0}
class GelbooruPostExtractor(GelbooruExtractor, booru.BooruPostExtractor):
while True:
page = self.request(url, params=params).text
ids = list(text.extract_iter(page, '<a id="p', '"'))
yield from ids
if len(ids) < 42:
return
params["pid"] += 42
class GelbooruPoolExtractor(GelbooruExtractor):
"""Extractor for image-pools from gelbooru.com"""
subcategory = "pool"
directory_fmt = ["{category}", "pool", "{pool}"]
pattern = [r"(?:https?://)?(?:www\.)?gelbooru\.com/(?:index\.php)?"
r"\?page=pool&s=show&id=(\d+)"]
test = [("https://gelbooru.com/index.php?page=pool&s=show&id=761", {
"count": 6,
})]
def __init__(self, match):
GelbooruExtractor.__init__(self)
self.pool_id = match.group(1)
def get_metadata(self):
return {"pool": self.pool_id}
def posts(self):
page = self.request("https://gelbooru.com/index.php?page=pool&s=show"
"&id=" + self.pool_id).text
return text.extract_iter(page, 'id="p', '"')
class GelbooruPostExtractor(GelbooruExtractor):
"""Extractor for single images from gelbooru.com"""
subcategory = "post"
pattern = [r"(?:https?://)?(?:www\.)?gelbooru\.com/(?:index\.php)?"
r"\?page=post&s=view&id=(\d+)"]
test = [("http://gelbooru.com/index.php?page=post&s=view&id=313638", {
test = [("https://gelbooru.com/index.php?page=post&s=view&id=313638", {
"content": "5e255713cbf0a8e0801dc423563c34d896bb9229",
"count": 1,
})]
def __init__(self, match):
GelbooruExtractor.__init__(self)
self.post_id = match.group(1)
def posts(self):
return (self.post_id,)

@ -83,7 +83,7 @@ skip = [
"archivedmoe", "archiveofsins", "thebarchive",
# temporary issues
"mangazuki",
"yeet",
"batoto", # cloudflare "protection"
]
# enable selective testing for direct calls
if __name__ == '__main__' and len(sys.argv) > 1:

Loading…
Cancel
Save