[behance] add gallery extractor (#95)

pull/133/head
Mike Fährmann 6 years ago
parent c83fc62abc
commit df082e923c
No known key found for this signature in database
GPG Key ID: 5680CA389D365A88

@ -12,6 +12,7 @@ arch.b4k.co https://arch.b4k.co/ Threads
Archive of Sins https://archiveofsins.com/ Threads
Archived.Moe https://archived.moe/ Threads
ArtStation https://www.artstation.com/ |Images from Use-0|
Behance https://www.behance.net/ Galleries
Danbooru https://danbooru.donmai.us/ Pools, Popular Images, Posts, Tag-Searches
Desuarchive https://desuarchive.org/ Threads
DeviantArt https://www.deviantart.com/ |Collections, De-1| Optional (OAuth)

@ -19,6 +19,7 @@ modules = [
"archiveofsins",
"artstation",
"b4k",
"behance",
"danbooru",
"desuarchive",
"deviantart",

@ -0,0 +1,111 @@
# -*- coding: utf-8 -*-
# Copyright 2018 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract images from https://www.behance.net/"""
from .common import Extractor, Message
from .. import text
class BehanceGalleryExtractor(Extractor):
"""Extractor for image galleries from www.behance.net"""
category = "behance"
subcategory = "gallery"
directory_fmt = ["{category}", "{user}", "{gallery_id} {title}"]
filename_fmt = "{category}_{gallery_id}_{num:>02}.{extension}"
archive_fmt = "{gallery_id}_{num}"
root = "https://www.behance.net"
pattern = [r"(?:https?://)?(?:www\.)?behance\.net/gallery/(\d+)"]
test = [
("https://www.behance.net/gallery/17386197", {
"count": 2,
"keyword": {
"title": str,
"user": "Place Studio, Julio César Velazquez",
"fields": ["Animation", "Character Design", "Directing"],
"date": 1401810111,
"views": int,
"votes": int,
"comments": int,
},
}),
]
def __init__(self, match):
Extractor.__init__(self)
self.gallery_id = match.group(1)
def items(self):
url = "{}/gallery/{}/a".format(self.root, self.gallery_id)
page = self.request(url, cookies={"ilo0": "true"}).text
data = self.get_metadata(page)
imgs = self.get_images(page)
data["count"] = len(imgs)
yield Message.Version, 1
yield Message.Directory, data
for data["num"], (url, external) in enumerate(imgs, 1):
if external:
yield Message.Queue, url, data
else:
yield Message.Url, url, text.nameext_from_url(url, data)
def get_metadata(self, page):
"""Collect metadata for extractor-job"""
users, pos = text.extract(
page, 'class="project-owner-info ', 'class="project-owner-actions')
title, pos = text.extract(
page, '<div class="project-title">', '</div>', pos)
fields, pos = text.extract(
page, '<ul id="project-fields-list">', '</ul>', pos)
stats, pos = text.extract(
page, '<div id="project-stats">', 'Published', pos)
date, pos = text.extract(
page, ' data-timestamp="', '"', pos)
users = self._parse_userinfo(users)
stats = text.split_html(stats)
return {
"gallery_id": text.parse_int(self.gallery_id),
"title": text.unescape(title),
"user": ", ".join(users),
"fields": text.split_html(fields),
"date": text.parse_int(date),
"views": text.parse_int(stats[0]),
"votes": text.parse_int(stats[1]),
"comments": text.parse_int(stats[2]),
}
@staticmethod
def get_images(page):
"""Extract and return a list of all image- and external urls"""
results = []
for p in text.extract_iter(page, "js-lightbox-slide-content", "<a "):
srcset = text.extract(p, 'srcset="', '"')[0]
if srcset:
url = srcset.rstrip(",").rpartition(",")[2].partition(" ")[0]
results.append((url, False))
elif "<iframe " in p:
url = text.extract(p, ' src="', '"')[0]
results.append((text.unescape(url), True))
return results
@staticmethod
def _parse_userinfo(users):
if users.startswith("multiple"):
return [
text.remove_html(user)
for user in text.extract_iter(
users, '<div class="rf-profile-item__info">', '</a>',
)
]
user = text.extract(users, ' class="profile-list-name"', '</a>')[0]
return (user.rpartition(">")[2],)
Loading…
Cancel
Save