[batoto] remove module

Site officially shut down on 2018.01.18
pull/79/head
Mike Fährmann 7 years ago
parent 27fce6f600
commit 1e93955170
No known key found for this signature in database
GPG Key ID: 5680CA389D365A88

@ -112,7 +112,6 @@ Supported Sites
* pixiv.net
* seiga.nicovideo.jp
* nijie.info
* bato.to
* mangastream.com
* kissmanga.com
* readcomiconline.to
@ -160,7 +159,7 @@ Username & Password
Some extractors require you to provide valid login-credentials in the form of
a username & password pair.
This is necessary for ``pixiv``, ``nijie`` and ``seiga`` and optional
(but strongly recommended) for ``exhentai``, ``batoto`` and ``sankaku``.
(but strongly recommended) for ``exhentai`` and ``sankaku``.
You can set the necessary information in your configuration file
(cf. gallery-dl.conf_)

@ -10,7 +10,6 @@ Site URL Capabilities
arch.b4k.co https://arch.b4k.co/ Threads
Archive of Sins https://archiveofsins.com/ Threads
Archived.Moe https://archived.moe/ Threads
Batoto https://bato.to/ Chapters, Manga Optional
Danbooru https://danbooru.donmai.us/ Pools, Popular Images, Posts, Tag-Searches
Desuarchive https://desuarchive.org/ Threads
DeviantArt https://www.deviantart.com/ |Collections, De-1| Optional (OAuth)

@ -18,7 +18,6 @@ modules = [
"archivedmoe",
"archiveofsins",
"b4k",
"batoto",
"danbooru",
"desuarchive",
"deviantart",

@ -1,217 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2017 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract manga chapters from https://bato.to/"""
from .common import MangaExtractor, AsynchronousExtractor, Message
from .. import text, util, cloudflare, exception
from ..cache import cache
import re
class BatotoExtractor():
"""Base class for batoto extractors"""
category = "batoto"
scheme = "https"
root = "https://bato.to"
cookienames = ("member_id", "pass_hash")
cookiedomain = ".bato.to"
request = cloudflare.request_func
def login(self):
"""Login and set necessary cookies"""
if self._check_cookies(self.cookienames):
return
username, password = self._get_auth_info()
if username:
cookies = self._login_impl(username, password)
for key, value in cookies.items():
self.session.cookies.set(
key, value, domain=self.cookiedomain)
@cache(maxage=7*24*60*60, keyarg=1)
def _login_impl(self, username, password):
"""Actual login implementation"""
self.log.info("Logging in as %s", username)
page = self.request(self.root).text
auth = text.extract(page, "name='auth_key' value='", "'")[0]
params = {
"app": "core",
"module": "global",
"section": "login",
"do": "process",
}
data = {
"auth_key": auth,
"referer": self.root,
"ips_username": username,
"ips_password": password,
"rememberMe": "1",
"anonymous": "1",
}
response = self.session.post(self.root + "/forums/index.php",
params=params, data=data)
if "Sign In - " in response.text:
raise exception.AuthenticationError()
return {c: response.cookies[c] for c in self.cookienames}
@staticmethod
def parse_chapter_string(data):
"""Parse 'chapter_string' value contained in 'data'"""
data["chapter_string"] = text.unescape(data["chapter_string"])
pattern = r"(?:Vol\.(\d+) )?Ch\.([\d\w]+)([^ :]*)(?::? (.+))?"
match = re.match(pattern, data["chapter_string"])
volume, chapter, data["chapter_minor"], title = match.groups()
data["volume"] = util.safe_int(volume)
data["chapter"] = util.safe_int(chapter, chapter)
data["title"] = title if title and title != "Read Online" else ""
return data
class BatotoMangaExtractor(BatotoExtractor, MangaExtractor):
"""Extractor for manga from bato.to"""
pattern = [r"(?:https?://)?(?:www\.)?(bato\.to"
r"/comic/_(?:/comics)?/[^/?&#]*-r\d+)"]
test = [
("http://bato.to/comic/_/comics/aria-r2007", {
"url": "a38585b0339587666d772ee06f2a60abdbf42a97",
"keyword": "c33ea7b97e3714530384e2411fae62ae51aae50d",
}),
# non-numeric chapter ("Extra")
("https://bato.to/comic/_/comics/cosplay-deka-r2563", {
"count": ">= 37",
}),
# short URL
("https://bato.to/comic/_/aria-r2007", None),
]
def chapters(self, page):
pos = 0
results = []
page = text.extract(
page, '<h3 class="maintitle">Chapters</h3>', '</tbody>')[0]
while True:
data, pos = text.extract_all(page, (
("language" , '<tr class="row lang_', ' '),
("token" , '/reader#', '"'),
("chapter_string", 'title="', ' | Sort: '),
(None , '<a href="https://bato.to/group/', ''),
("group" , '>', '<'),
(None , '<a href="https://bato.to/forums/user/', ''),
("contributor", '>', '<'),
), pos)
if not data["token"]:
return results
self.parse_chapter_string(data)
data["lang"] = util.language_to_code(data["language"])
data["group"] = text.unescape(data["group"])
data["contributor"] = text.unescape(data["contributor"])
url = self.root + "/reader#" + data["token"]
results.append((url, data))
class BatotoChapterExtractor(BatotoExtractor, AsynchronousExtractor):
"""Extractor for manga-chapters from bato.to"""
subcategory = "chapter"
directory_fmt = [
"{category}", "{manga}",
"{volume:?v/ />02}c{chapter:>03}{chapter_minor}{title:?: //}"]
filename_fmt = (
"{manga}_c{chapter:>03}{chapter_minor}_{page:>03}.{extension}")
pattern = [r"(?:https?://)?(?:www\.)?bato\.to/reader#([0-9a-f]+)"]
test = [
("http://bato.to/reader#459878c8fda07502", {
"url": "432d7958506ad913b0a9e42664a89e46a63e9296",
"keyword": "96598b6f94d2b26d11c2780f8173cd6ab5fe9906",
}),
("http://bato.to/reader#459878c8fda07502", { # error 10030
"exception": exception.AuthorizationError,
"options": (("username", None),),
}),
("https://bato.to/reader#528e7d7c4b1db6ff", { # error 10031
"exception": exception.AuthorizationError,
}),
("http://bato.to/reader#459878c8fda07503", { # error 10020
"exception": exception.NotFoundError,
}),
]
reader_url = "https://bato.to/areader"
def __init__(self, match):
super().__init__()
self.token = match.group(1)
def items(self):
self.login()
self.session.headers.update({
"X-Requested-With": "XMLHttpRequest",
"Referer": self.root + "/reader",
})
params = {
"id": self.token,
"p": 1,
"supress_webtoon": "t",
}
response = self.request(self.reader_url, params=params)
if response.status_code == 405:
error = text.extract(response.text, "ERROR [", "]")[0]
if error in ("10030", "10031"):
raise exception.AuthorizationError()
elif error == "10020":
raise exception.NotFoundError("chapter")
else:
raise Exception("error code: " + error)
page = response.text
data = self.get_job_metadata(page)
yield Message.Version, 1
yield Message.Directory, data.copy()
for data["page"] in range(1, data["count"]+1):
next_url, image_url = self.get_page_urls(page)
text.nameext_from_url(image_url, data)
yield Message.Url, image_url, data.copy()
if next_url:
params["p"] += 1
page = self.request(self.reader_url, params=params).text
def get_job_metadata(self, page):
"""Collect metadata for extractor-job"""
extr = text.extract
_ , pos = extr(page, '<select name="chapter_select"', '')
cinfo, pos = extr(page, 'selected="selected">', '</option>', pos)
_ , pos = extr(page, '<select name="group_select"', '', pos)
group, pos = extr(page, 'selected="selected">', ' - ', pos)
lang , pos = extr(page, '', '</option>', pos)
_ , pos = extr(page, '<select name="page_select"', '', pos)
_ , pos = extr(page, '</select>', '', pos)
count, pos = extr(page, '>page ', '<', pos-35)
manga, pos = extr(page, "document.title = '", " - ", pos)
data = {
"token": self.token,
"manga": text.unescape(manga),
"chapter_string": cinfo,
"group": text.unescape(group),
"lang": util.language_to_code(lang),
"language": lang,
"count": util.safe_int(count),
}
return self.parse_chapter_string(data)
@staticmethod
def get_page_urls(page):
"""Collect next- and image-url for one manga-page"""
_ , pos = text.extract(page, 'title="Next Chapter"', '')
nurl, pos = text.extract(page, '<a href="', '"', pos)
_ , pos = text.extract(page, '<div id="full_image"', '', pos)
iurl, pos = text.extract(page, '<img src="', '"', pos)
return nurl if "_" in nurl else None, iurl

@ -90,7 +90,7 @@ class TestCookiedict(unittest.TestCase):
self.assertEqual(sorted(cookies.values()), sorted(self.cdict.values()))
def test_domain(self):
for category in ["batoto", "exhentai", "nijie", "sankaku", "seiga"]:
for category in ["exhentai", "nijie", "sankaku", "seiga"]:
extr = _get_extractor(category)
cookies = extr.session.cookies
for key in self.cdict.keys():
@ -106,7 +106,6 @@ class TestCookieLogin(unittest.TestCase):
def test_cookie_login(self):
extr_cookies = {
"batoto": ("member_id", "pass_hash"),
"exhentai": ("ipb_member_id", "ipb_pass_hash"),
"nijie": ("nemail", "nlogin"),
"sankaku": ("login", "pass_hash"),

Loading…
Cancel
Save