You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
gallery-dl/gallery_dl/extractor/nijie.py

69 lines
2.4 KiB

# -*- coding: utf-8 -*-
# Copyright 2015 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract images from https://nijie.info/"""
from .common import AsynchronousExtractor, Message
from .. import config, text
import re
class NijieExtractor(AsynchronousExtractor):
category = "nijie"
directory_fmt = ["{category}", "{artist-id}"]
filename_fmt = "{category}_{artist-id}_{image-id}_p{index:>02}.{extension}"
pattern = [r"(?:https?://)?(?:www\.)?nijie\.info/members(?:_illust)?\.php\?id=(\d+)"]
popup_url = "https://nijie.info/view_popup.php?id="
def __init__(self, match):
AsynchronousExtractor.__init__(self)
self.artist_id = match.group(1)
self.artist_url = (
"https://nijie.info/members_illust.php?id="
+ self.artist_id
)
self.session.headers["Referer"] = self.artist_url
self.session.cookies["R18"] = "1"
self.session.cookies["nijie_referer"] = "nijie.info"
self.session.cookies.update(
config.get(("extractor", self.category, "cookies"))
)
def items(self):
data = self.get_job_metadata()
yield Message.Version, 1
yield Message.Directory, data
for image_id in self.get_image_ids():
for image_url, image_data in self.get_image_data(image_id):
image_data.update(data)
yield Message.Url, image_url, image_data
def get_job_metadata(self):
"""Collect metadata for extractor-job"""
return {
"category": self.category,
"artist-id": self.artist_id,
}
def get_image_ids(self):
"""Collect all image-ids for a specific artist"""
page = self.request(self.artist_url).text
regex = r'<a href="/view\.php\?id=(\d+)"'
return [m.group(1) for m in re.finditer(regex, page)]
def get_image_data(self, image_id):
"""Get URL and metadata for images specified by 'image_id'"""
page = self.request(self.popup_url + image_id).text
matches = re.findall('<img src="([^"]+)"', page)
for index, url in enumerate(matches):
yield "https:" + url, text.nameext_from_url(url, {
"count": len(matches),
"index": index,
"image-id": image_id,
})