code cleanup to use nameext_from_url

pull/13/head
Mike Fährmann 9 years ago
parent ca523b9f64
commit c2f0720184
No known key found for this signature in database
GPG Key ID: 5680CA389D365A88

@ -44,14 +44,11 @@ class BatotoExtractor(AsynchronousExtractor):
page = self.request(self.url, params=params).text
data = self.get_job_metadata(page)
yield Message.Version, 1
yield Message.Directory, data
yield Message.Directory, data.copy()
for i in range(int(data["count"])):
next_url, image_url = self.get_page_urls(page)
filename = text.unquote(text.filename_from_url(image_url))
name, ext = os.path.splitext(filename)
text.nameext_from_url(image_url, data)
data["page"] = i+1
data["name"] = name
data["extension"] = ext[1:]
yield Message.Url, image_url, data.copy()
if next_url:
params["p"] += 1

@ -59,13 +59,7 @@ class BooruExtractor(Extractor):
def get_file_metadata(self, data):
"""Collect metadata for a downloadable file"""
data["category"] = self.info["category"]
data["filename"] = text.unquote(
text.filename_from_url(self.get_file_url(data))
)
name, ext = os.path.splitext(data["filename"])
data["name"] = name
data["extension"] = ext[1:]
return data
return text.nameext_from_url(self.get_file_url(data), data)
def get_file_url(self, data):
"""Extract download-url from 'data'"""

@ -85,17 +85,15 @@ class DeviantArtExtractor(AsynchronousExtractor):
url , pos = text.extract(image, ' data-super-img="', '"', pos)
width , pos = text.extract(image, ' data-super-width="', '"', pos)
height, pos = text.extract(image, ' data-super-height="', '"', pos)
name, ext = os.path.splitext(text.filename_from_url(url))
return url, {
data = {
"index": index,
"title": match.group(1),
"artist": match.group(2),
"date": match.group(3),
"width": width,
"height": height,
"name": name,
"extension": ext[1:],
}
return url, text.nameext_from_url(url, data)
@staticmethod
def extract_data(txt, attr, pattern):

@ -58,10 +58,8 @@ class ExhentaiExtractor(Extractor):
urlkey = "origurl"
for num, image in enumerate(self.get_images(url), 1):
image.update(data)
name, ext = os.path.splitext(text.filename_from_url(image["url"]))
image["num"] = num
image["name"] = name
image["extension"] = ext[1:]
text.nameext_from_url(image["url"], image)
if "/fullimg.php" in image[urlkey]:
time.sleep(random.uniform(1, 2))
yield Message.Url, image[urlkey], image

@ -36,10 +36,8 @@ class HbrowseExtractor(Extractor):
yield Message.Version, 1
yield Message.Directory, data
for num, url in enumerate(self.get_image_urls(page), 1):
name, ext = os.path.splitext(text.filename_from_url(url))
data["name"] = name
data["extension"] = ext[1:]
data["num"] = num
text.nameext_from_url(url, data)
yield Message.Url, url, data
def get_job_metadata(self, page):

@ -70,14 +70,12 @@ class HentaiFoundryExtractor(Extractor):
page = self.request(url).text
index = text.extract(url, '/', '/', len(self.url_base) + len(self.artist))[0]
title, pos = text.extract(page, 'Pictures</a> &raquo; <span>', '<')
url , pos = text.extract(page, '//pictures.hentai-foundry.com', '"', pos)#
name, ext = os.path.splitext(text.filename_from_url(url))
return "http://pictures.hentai-foundry.com" + url, {
url , pos = text.extract(page, '//pictures.hentai-foundry.com', '"', pos)
data = {
"index": index,
"title": text.unescape(title),
"name": name,
"extension": ext[1:],
}
return "http://pictures.hentai-foundry.com" + url, text.nameext_from_url(url, data)
def set_filters(self, token):
"""Set site-internal filters to show all images"""

@ -37,11 +37,8 @@ class HitomiExtractor(Extractor):
yield Message.Version, 1
yield Message.Directory, data
for num, url in enumerate(images, 1):
name, ext = os.path.splitext(text.filename_from_url(url))
data["num"] = num
data["name"] = name
data["extension"] = ext[1:]
yield Message.Url, url, data
yield Message.Url, url, text.nameext_from_url(url, data)
def get_job_metadata(self, page):
"""Collect metadata for extractor-job"""

@ -37,11 +37,8 @@ class ImagebamExtractor(AsynchronousExtractor):
yield Message.Directory, data
for image_url, image_id in self.get_images(data["first-url"]):
data["id"] = image_id
data["filename"] = text.unquote(text.filename_from_url(image_url))
name, ext = os.path.splitext(data["filename"])
data["num"] += 1
data["name"] = name
data["extension"] = ext[1:]
text.nameext_from_url(image_url, data)
yield Message.Url, image_url, data.copy()
def get_job_metadata(self):

@ -35,12 +35,8 @@ class ImgthExtractor(Extractor):
yield Message.Version, 1
yield Message.Directory, data
for num, url in enumerate(self.get_images(page), 1):
name, ext = os.path.splitext(text.filename_from_url(url))
data["num"] = num
data["name"] = name
data["extension"] = ext[1:]
yield Message.Url, url, data
yield Message.Url, url, text.nameext_from_url(url, data)
def get_images(self, page):
pnum = 0
while True:

@ -38,12 +38,8 @@ class KissmangaExtractor(Extractor):
yield Message.Version, 1
yield Message.Directory, data
for num, url in enumerate(imgs, 1):
filename = text.unquote(text.filename_from_url(url))
name, ext = os.path.splitext(filename)
data["page"] = num
data["name"] = name
data["extension"] = ext[1:]
yield Message.Url, url, data
yield Message.Url, url, text.nameext_from_url(url, data)
@staticmethod
def get_job_metadata(page):

@ -91,12 +91,7 @@ class MangaReaderExtractor(AsynchronousExtractor):
width , pos = extr(page, '<img id="img" width="', '"', pos)
height, pos = extr(page, ' height="', '"', pos)
image, pos = extr(page, ' src="', '"', pos)
filename = text.unquote(text.filename_from_url(image))
name, ext = os.path.splitext(filename)
return self.url_base + url, image, {
return self.url_base + url, image, text.nameext_from_url(image, {
"width": width,
"height": height,
"name": name,
"extension": ext[1:],
}
})

@ -34,12 +34,10 @@ class MangaShareExtractor(AsynchronousExtractor):
page = self.request(self.url_fmt.format(self.part, 1)).text
data = self.get_job_metadata(page)
yield Message.Version, 1
yield Message.Directory, data
yield Message.Directory, data.copy()
for i, url in zip(range(int(data["count"])), (self.get_image_urls(page))):
name, ext = os.path.splitext(text.filename_from_url(url))
data["name"] = name
data["extension"] = ext[1:]
data["page"] = i+1
text.nameext_from_url(url, data)
yield Message.Url, url, data.copy()
@staticmethod

@ -40,11 +40,8 @@ class MangaStreamExtractor(AsynchronousExtractor):
if next_url:
page = self.request(next_url).text
next_url, image_url = self.get_page_metadata(page)
filename = text.unquote(text.filename_from_url(image_url))
name, ext = os.path.splitext(filename)
text.nameext_from_url(image_url, data)
data["page"] = i+1
data["name"] = name
data["extension"] = ext[1:]
yield Message.Url, image_url, data.copy()
def get_job_metadata(self, page):

@ -67,10 +67,8 @@ class NijieExtractor(AsynchronousExtractor):
page = self.request(self.popup_url + image_id).text
matches = re.findall('<img src="([^"]+)"', page)
for index, url in enumerate(matches):
yield "https:" + url, {
yield "https:" + url, text.nameext_from_url(url, {
"count": len(matches),
"index": index,
"image-id": image_id,
"name" : text.filename_from_url(url),
"extension": url[url.rfind(".")+1:],
}
})

@ -76,14 +76,11 @@ class SankakuExtractor(AsynchronousExtractor):
image_url, pos = text.extract(page, '<li>Original: <a href="', '"')
width , pos = text.extract(page, '>', 'x', pos)
height , pos = text.extract(page, '', ' ', pos)
filename = text.filename_from_url(image_url)
name, ext = os.path.splitext(filename)
return {
data = text.nameext_from_url(image_url, {
"id": image_id,
"file-url": "https:" + image_url,
"width": width,
"height": height,
"md5": name,
"name": name,
"extension": ext[1:],
}
})
data["md5"] = data["name"]
return data

@ -41,10 +41,8 @@ class SpectrumNexusExtractor(AsynchronousExtractor):
count = int(data["count"])
for i in range(1, count+1):
url = self.get_image_url(page)
name, ext = os.path.splitext(text.filename_from_url(url))
text.nameext_from_url(url, data)
data["page"] = i
data["name"] = name
data["extension"] = ext[1:]
yield Message.Url, url, data.copy()
if i < count:
params["page"] += 1

Loading…
Cancel
Save