[tsumino] add login capabilities (#161)

pull/170/head
Mike Fährmann 6 years ago
parent dd358b4564
commit bfbbac4495
No known key found for this signature in database
GPG Key ID: 5680CA389D365A88

@ -156,7 +156,7 @@ Some extractors require you to provide valid login-credentials in the form of
a username & password pair. a username & password pair.
This is necessary for ``pixiv``, ``nijie`` and ``seiga`` This is necessary for ``pixiv``, ``nijie`` and ``seiga``
and optional (but strongly recommended) for ``exhentai``, ``luscious``, and optional (but strongly recommended) for ``exhentai``, ``luscious``,
``sankaku``, ``idolcomplex`` and ``wallhaven``. ``sankaku``, ``idolcomplex``, ``tsumino`` and ``wallhaven``.
You can set the necessary information in your configuration file You can set the necessary information in your configuration file
(cf. gallery-dl.conf_) (cf. gallery-dl.conf_)

@ -83,7 +83,7 @@ Simply Hentai https://www.simply-hentai.com/ Galleries, individual I
SlideShare https://www.slideshare.net/ Presentations SlideShare https://www.slideshare.net/ Presentations
SmugMug https://www.smugmug.com/ |Capabilities-8| Optional (OAuth) SmugMug https://www.smugmug.com/ |Capabilities-8| Optional (OAuth)
The /b/ Archive https://thebarchive.com/ Threads The /b/ Archive https://thebarchive.com/ Threads
Tsumino https://www.tsumino.com/ Galleries Tsumino https://www.tsumino.com/ Galleries Optional
Tumblr https://www.tumblr.com/ Images from Users, Likes, Posts, Tag-Searches Optional (OAuth) Tumblr https://www.tumblr.com/ Images from Users, Likes, Posts, Tag-Searches Optional (OAuth)
Twitter https://twitter.com/ Media Timelines, Timelines, Tweets Twitter https://twitter.com/ Media Timelines, Timelines, Tweets
Wallhaven https://alpha.wallhaven.cc/ individual Images, Search Results Optional Wallhaven https://alpha.wallhaven.cc/ individual Images, Search Results Optional

@ -223,6 +223,7 @@ class ChapterExtractor(Extractor):
self.url = url self.url = url
def items(self): def items(self):
self.login()
page = self.request(self.url).text page = self.request(self.url).text
data = self.get_metadata(page) data = self.get_metadata(page)
imgs = self.get_images(page) imgs = self.get_images(page)
@ -230,7 +231,7 @@ class ChapterExtractor(Extractor):
if "count" in data: if "count" in data:
images = zip( images = zip(
range(1, data["count"]+1), range(1, data["count"]+1),
imgs imgs,
) )
else: else:
try: try:
@ -246,6 +247,9 @@ class ChapterExtractor(Extractor):
data.update(imgdata) data.update(imgdata)
yield Message.Url, url, text.nameext_from_url(url, data) yield Message.Url, url, text.nameext_from_url(url, data)
def login(self):
"""Login and set necessary cookies"""
def get_metadata(self, page): def get_metadata(self, page):
"""Return a dict with general metadata""" """Return a dict with general metadata"""

@ -9,7 +9,8 @@
"""Extractors for https://www.tsumino.com/""" """Extractors for https://www.tsumino.com/"""
from .common import ChapterExtractor from .common import ChapterExtractor
from .. import text from .. import text, exception
from ..cache import cache
class TsuminoGalleryExtractor(ChapterExtractor): class TsuminoGalleryExtractor(ChapterExtractor):
@ -19,6 +20,7 @@ class TsuminoGalleryExtractor(ChapterExtractor):
filename_fmt = "{category}_{gallery_id}_{page:>03}.{extension}" filename_fmt = "{category}_{gallery_id}_{page:>03}.{extension}"
directory_fmt = ["{category}", "{gallery_id} {title}"] directory_fmt = ["{category}", "{gallery_id} {title}"]
archive_fmt = "{gallery_id}_{page}" archive_fmt = "{gallery_id}_{page}"
cookiedomain = "www.tsumino.com"
pattern = [r"(?i)(?:https?://)?(?:www\.)?tsumino\.com" pattern = [r"(?i)(?:https?://)?(?:www\.)?tsumino\.com"
r"/(?:Book/Info|Read/View)/(\d+)"] r"/(?:Book/Info|Read/View)/(\d+)"]
test = [ test = [
@ -35,8 +37,25 @@ class TsuminoGalleryExtractor(ChapterExtractor):
url = "{}/Book/Info/{}".format(self.root, self.gallery_id) url = "{}/Book/Info/{}".format(self.root, self.gallery_id)
ChapterExtractor.__init__(self, url) ChapterExtractor.__init__(self, url)
self.session.cookies.setdefault( def login(self):
"ASP.NET_SessionId", "x1drgggilez4cpkttneukrc5") username, password = self._get_auth_info()
if username:
self._update_cookies(self._login_impl(username, password))
else:
self.session.cookies.setdefault(
"ASP.NET_SessionId", "x1drgggilez4cpkttneukrc5")
@cache(maxage=14*24*60*60, keyarg=1)
def _login_impl(self, username, password):
self.log.info("Logging in as %s", username)
url = "{}/Account/Login".format(self.root)
headers = {"Referer": url}
data = {"Username": username, "Password": password}
response = self.request(url, method="POST", headers=headers, data=data)
if not response.history:
raise exception.AuthenticationError()
return {".aotsumino": response.history[0].cookies[".aotsumino"]}
def get_metadata(self, page): def get_metadata(self, page):
extr = text.extract extr = text.extract

@ -100,6 +100,7 @@ AUTH_MAP = {
"sankaku" : "Optional", "sankaku" : "Optional",
"seiga" : "Required", "seiga" : "Required",
"smugmug" : "Optional (OAuth)", "smugmug" : "Optional (OAuth)",
"tsumino" : "Optional",
"tumblr" : "Optional (OAuth)", "tumblr" : "Optional (OAuth)",
"wallhaven" : "Optional", "wallhaven" : "Optional",
} }

Loading…
Cancel
Save