simplify 'docs/options.md' generation

use the plain text output from '--help'
instead of trying to generate 'fancy' HTML with Markdown restrictions
pull/3503/head
Mike Fährmann 2 years ago
parent 9695c4e88d
commit 3c03928d75
No known key found for this signature in database
GPG Key ID: 5680CA389D365A88

@ -3,442 +3,119 @@
<!-- auto-generated by scripts/options.py -->
## General Options
<table>
<tbody valign="top">
<tr>
<td><code>&#8209;h</code></td>
<td><code>&#8209;&#8209;help</code></td>
<td></td>
<td>Print this help message and exit</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;version</code></td>
<td></td>
<td>Print program version and exit</td>
</tr>
<tr>
<td><code>&#8209;i</code></td>
<td><code>&#8209;&#8209;input&#8209;file</code></td>
<td><code>FILE</code></td>
<td>Download URLs found in FILE (<code>-</code> for stdin). More than one --input-file can be specified</td>
</tr>
<tr>
<td><code>&#8209;d</code></td>
<td><code>&#8209;&#8209;destination</code></td>
<td><code>PATH</code></td>
<td>Target location for file downloads</td>
</tr>
<tr>
<td><code>&#8209;D</code></td>
<td><code>&#8209;&#8209;directory</code></td>
<td><code>PATH</code></td>
<td>Exact location for file downloads</td>
</tr>
<tr>
<td><code>&#8209;f</code></td>
<td><code>&#8209;&#8209;filename</code></td>
<td><code>FORMAT</code></td>
<td>Filename format string for downloaded files (<code>/O</code> for "original" filenames)</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;proxy</code></td>
<td><code>URL</code></td>
<td>Use the specified proxy</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;source&#8209;address</code></td>
<td><code>IP</code></td>
<td>Client-side IP address to bind to</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;user&#8209;agent</code></td>
<td><code>UA</code></td>
<td>User-Agent request header</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;clear&#8209;cache</code></td>
<td><code>MODULE</code></td>
<td>Delete cached login sessions, cookies, etc. for MODULE (ALL to delete everything)</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;cookies</code></td>
<td><code>FILE</code></td>
<td>File to load additional cookies from</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;cookies&#8209;from&#8209;browser</code></td>
<td><code>BROWSER</code></td>
<td>Name of the browser to load cookies from, with optional keyring name prefixed with <code>+</code>, profile prefixed with <code>:</code>, and container prefixed with <code>::</code> (<code>none</code> for no container)</td>
</tr>
</tbody>
</table>
## Output Options
<table>
<tbody valign="top">
<tr>
<td><code>&#8209;q</code></td>
<td><code>&#8209;&#8209;quiet</code></td>
<td></td>
<td>Activate quiet mode</td>
</tr>
<tr>
<td><code>&#8209;v</code></td>
<td><code>&#8209;&#8209;verbose</code></td>
<td></td>
<td>Print various debugging information</td>
</tr>
<tr>
<td><code>&#8209;g</code></td>
<td><code>&#8209;&#8209;get&#8209;urls</code></td>
<td></td>
<td>Print URLs instead of downloading</td>
</tr>
<tr>
<td><code>&#8209;G</code></td>
<td><code>&#8209;&#8209;resolve&#8209;urls</code></td>
<td></td>
<td>Print URLs instead of downloading; resolve intermediary URLs</td>
</tr>
<tr>
<td><code>&#8209;j</code></td>
<td><code>&#8209;&#8209;dump&#8209;json</code></td>
<td></td>
<td>Print JSON information</td>
</tr>
<tr>
<td><code>&#8209;s</code></td>
<td><code>&#8209;&#8209;simulate</code></td>
<td></td>
<td>Simulate data extraction; do not download anything</td>
</tr>
<tr>
<td><code>&#8209;E</code></td>
<td><code>&#8209;&#8209;extractor&#8209;info</code></td>
<td></td>
<td>Print extractor defaults and settings</td>
</tr>
<tr>
<td><code>&#8209;K</code></td>
<td><code>&#8209;&#8209;list&#8209;keywords</code></td>
<td></td>
<td>Print a list of available keywords and example values for the given URLs</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;list&#8209;modules</code></td>
<td></td>
<td>Print a list of available extractor modules</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;list&#8209;extractors</code></td>
<td></td>
<td>Print a list of extractor classes with description, (sub)category and example URL</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;write&#8209;log</code></td>
<td><code>FILE</code></td>
<td>Write logging output to FILE</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;write&#8209;unsupported</code></td>
<td><code>FILE</code></td>
<td>Write URLs, which get emitted by other extractors but cannot be handled, to FILE</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;write&#8209;pages</code></td>
<td></td>
<td>Write downloaded intermediary pages to files in the current directory to debug problems</td>
</tr>
</tbody>
</table>
## Downloader Options
<table>
<tbody valign="top">
<tr>
<td><code>&#8209;r</code></td>
<td><code>&#8209;&#8209;limit&#8209;rate</code></td>
<td><code>RATE</code></td>
<td>Maximum download rate (e.g. 500k or 2.5M)</td>
</tr>
<tr>
<td><code>&#8209;R</code></td>
<td><code>&#8209;&#8209;retries</code></td>
<td><code>N</code></td>
<td>Maximum number of retries for failed HTTP requests or -1 for infinite retries (default: 4)</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;http&#8209;timeout</code></td>
<td><code>SECONDS</code></td>
<td>Timeout for HTTP connections (default: 30.0)</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;sleep</code></td>
<td><code>SECONDS</code></td>
<td>Number of seconds to wait before each download. This can be either a constant value or a range (e.g. 2.7 or 2.0-3.5)</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;sleep&#8209;request</code></td>
<td><code>SECONDS</code></td>
<td>Number of seconds to wait between HTTP requests during data extraction</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;sleep&#8209;extractor</code></td>
<td><code>SECONDS</code></td>
<td>Number of seconds to wait before starting data extraction for an input URL</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;filesize&#8209;min</code></td>
<td><code>SIZE</code></td>
<td>Do not download files smaller than SIZE (e.g. 500k or 2.5M)</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;filesize&#8209;max</code></td>
<td><code>SIZE</code></td>
<td>Do not download files larger than SIZE (e.g. 500k or 2.5M)</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;chunk&#8209;size</code></td>
<td><code>SIZE</code></td>
<td>Size of in-memory data chunks (default: 32k)</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;no&#8209;part</code></td>
<td></td>
<td>Do not use .part files</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;no&#8209;skip</code></td>
<td></td>
<td>Do not skip downloads; overwrite existing files</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;no&#8209;mtime</code></td>
<td></td>
<td>Do not set file modification times according to Last-Modified HTTP response headers</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;no&#8209;download</code></td>
<td></td>
<td>Do not download any files</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;no&#8209;postprocessors</code></td>
<td></td>
<td>Do not run any post processors</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;no&#8209;check&#8209;certificate</code></td>
<td></td>
<td>Disable HTTPS certificate validation</td>
</tr>
</tbody>
</table>
## Configuration Options
<table>
<tbody valign="top">
<tr>
<td><code>&#8209;c</code></td>
<td><code>&#8209;&#8209;config</code></td>
<td><code>FILE</code></td>
<td>Additional configuration files</td>
</tr>
<tr>
<td><code>&#8209;o</code></td>
<td><code>&#8209;&#8209;option</code></td>
<td><code>OPT</code></td>
<td>Additional <code>&lt;key&gt;=&lt;value&gt;</code> option values</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;ignore&#8209;config</code></td>
<td></td>
<td>Do not read default configuration files</td>
</tr>
</tbody>
</table>
## Authentication Options
<table>
<tbody valign="top">
<tr>
<td><code>&#8209;u</code></td>
<td><code>&#8209;&#8209;username</code></td>
<td><code>USER</code></td>
<td>Username to login with</td>
</tr>
<tr>
<td><code>&#8209;p</code></td>
<td><code>&#8209;&#8209;password</code></td>
<td><code>PASS</code></td>
<td>Password belonging to the given username</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;netrc</code></td>
<td></td>
<td>Enable .netrc authentication data</td>
</tr>
</tbody>
</table>
## Selection Options
<table>
<tbody valign="top">
<tr>
<td></td>
<td><code>&#8209;&#8209;download&#8209;archive</code></td>
<td><code>FILE</code></td>
<td>Record all downloaded or skipped files in FILE and skip downloading any file already in it</td>
</tr>
<tr>
<td><code>&#8209;A</code></td>
<td><code>&#8209;&#8209;abort</code></td>
<td><code>N</code></td>
<td>Stop current extractor run after N consecutive file downloads were skipped</td>
</tr>
<tr>
<td><code>&#8209;T</code></td>
<td><code>&#8209;&#8209;terminate</code></td>
<td><code>N</code></td>
<td>Stop current and parent extractor runs after N consecutive file downloads were skipped</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;range</code></td>
<td><code>RANGE</code></td>
<td>Index range(s) specifying which files to download. These can be either a constant value, range, or slice (e.g. <code>5</code>, <code>8-20</code>, or <code>1:24:3</code>)</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;chapter&#8209;range</code></td>
<td><code>RANGE</code></td>
<td>Like <code>--range</code>, but applies to manga chapters and other delegated URLs</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;filter</code></td>
<td><code>EXPR</code></td>
<td>Python expression controlling which files to download. Files for which the expression evaluates to False are ignored. Available keys are the filename-specific ones listed by <code>-K</code>. <br />Example: --filter "image_width &gt;= 1000 and rating in ('s', 'q')"</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;chapter&#8209;filter</code></td>
<td><code>EXPR</code></td>
<td>Like <code>--filter</code>, but applies to manga chapters and other delegated URLs</td>
</tr>
</tbody>
</table>
## Post-processing Options
<table>
<tbody valign="top">
<tr>
<td></td>
<td><code>&#8209;&#8209;zip</code></td>
<td></td>
<td>Store downloaded files in a ZIP archive</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;ugoira&#8209;conv</code></td>
<td></td>
<td>Convert Pixiv Ugoira to WebM (requires FFmpeg)</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;ugoira&#8209;conv&#8209;lossless</code></td>
<td></td>
<td>Convert Pixiv Ugoira to WebM in VP9 lossless mode</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;ugoira&#8209;conv&#8209;copy</code></td>
<td></td>
<td>Convert Pixiv Ugoira to MKV without re-encoding any frames</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;write&#8209;metadata</code></td>
<td></td>
<td>Write metadata to separate JSON files</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;write&#8209;info&#8209;json</code></td>
<td></td>
<td>Write gallery metadata to a info.json file</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;write&#8209;tags</code></td>
<td></td>
<td>Write image tags to separate text files</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;mtime&#8209;from&#8209;date</code></td>
<td></td>
<td>Set file modification times according to <code>date</code> metadata</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;exec</code></td>
<td><code>CMD</code></td>
<td>Execute CMD for each downloaded file. <br />Example: --exec "convert {} {}.png && rm {}"</td>
</tr>
<tr>
<td></td>
<td><code>&#8209;&#8209;exec&#8209;after</code></td>
<td><code>CMD</code></td>
<td>Execute CMD after all files were downloaded successfully. <br />Example: --exec-after "cd {} && convert * ../doc.pdf"</td>
</tr>
<tr>
<td><code>&#8209;P</code></td>
<td><code>&#8209;&#8209;postprocessor</code></td>
<td><code>NAME</code></td>
<td>Activate the specified post processor</td>
</tr>
</tbody>
</table>
## General Options:
-h, --help Print this help message and exit
--version Print program version and exit
-i, --input-file FILE Download URLs found in FILE ('-' for stdin).
More than one --input-file can be specified
-d, --destination PATH Target location for file downloads
-D, --directory PATH Exact location for file downloads
-f, --filename FORMAT Filename format string for downloaded files
('/O' for "original" filenames)
--proxy URL Use the specified proxy
--source-address IP Client-side IP address to bind to
--user-agent UA User-Agent request header
--clear-cache MODULE Delete cached login sessions, cookies, etc. for
MODULE (ALL to delete everything)
--cookies FILE File to load additional cookies from
--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]
Name of the browser to load cookies from, with
optional keyring name prefixed with '+', profile
prefixed with ':', and container prefixed with
'::' ('none' for no container)
## Output Options:
-q, --quiet Activate quiet mode
-v, --verbose Print various debugging information
-g, --get-urls Print URLs instead of downloading
-G, --resolve-urls Print URLs instead of downloading; resolve
intermediary URLs
-j, --dump-json Print JSON information
-s, --simulate Simulate data extraction; do not download
anything
-E, --extractor-info Print extractor defaults and settings
-K, --list-keywords Print a list of available keywords and example
values for the given URLs
--list-modules Print a list of available extractor modules
--list-extractors Print a list of extractor classes with
description, (sub)category and example URL
--write-log FILE Write logging output to FILE
--write-unsupported FILE Write URLs, which get emitted by other
extractors but cannot be handled, to FILE
--write-pages Write downloaded intermediary pages to files in
the current directory to debug problems
## Downloader Options:
-r, --limit-rate RATE Maximum download rate (e.g. 500k or 2.5M)
-R, --retries N Maximum number of retries for failed HTTP
requests or -1 for infinite retries (default: 4)
--http-timeout SECONDS Timeout for HTTP connections (default: 30.0)
--sleep SECONDS Number of seconds to wait before each download.
This can be either a constant value or a range
(e.g. 2.7 or 2.0-3.5)
--sleep-request SECONDS Number of seconds to wait between HTTP requests
during data extraction
--sleep-extractor SECONDS Number of seconds to wait before starting data
extraction for an input URL
--filesize-min SIZE Do not download files smaller than SIZE (e.g.
500k or 2.5M)
--filesize-max SIZE Do not download files larger than SIZE (e.g.
500k or 2.5M)
--chunk-size SIZE Size of in-memory data chunks (default: 32k)
--no-part Do not use .part files
--no-skip Do not skip downloads; overwrite existing files
--no-mtime Do not set file modification times according to
Last-Modified HTTP response headers
--no-download Do not download any files
--no-postprocessors Do not run any post processors
--no-check-certificate Disable HTTPS certificate validation
## Configuration Options:
-c, --config FILE Additional configuration files
-o, --option OPT Additional '<key>=<value>' option values
--ignore-config Do not read default configuration files
## Authentication Options:
-u, --username USER Username to login with
-p, --password PASS Password belonging to the given username
--netrc Enable .netrc authentication data
## Selection Options:
--download-archive FILE Record all downloaded or skipped files in FILE
and skip downloading any file already in it
-A, --abort N Stop current extractor run after N consecutive
file downloads were skipped
-T, --terminate N Stop current and parent extractor runs after N
consecutive file downloads were skipped
--range RANGE Index range(s) specifying which files to
download. These can be either a constant value,
range, or slice (e.g. '5', '8-20', or '1:24:3')
--chapter-range RANGE Like '--range', but applies to manga chapters
and other delegated URLs
--filter EXPR Python expression controlling which files to
download. Files for which the expression
evaluates to False are ignored. Available keys
are the filename-specific ones listed by '-K'.
Example: --filter "image_width >= 1000 and
rating in ('s', 'q')"
--chapter-filter EXPR Like '--filter', but applies to manga chapters
and other delegated URLs
## Post-processing Options:
--zip Store downloaded files in a ZIP archive
--ugoira-conv Convert Pixiv Ugoira to WebM (requires FFmpeg)
--ugoira-conv-lossless Convert Pixiv Ugoira to WebM in VP9 lossless
mode
--ugoira-conv-copy Convert Pixiv Ugoira to MKV without re-encoding
any frames
--write-metadata Write metadata to separate JSON files
--write-info-json Write gallery metadata to a info.json file
--write-tags Write image tags to separate text files
--mtime-from-date Set file modification times according to 'date'
metadata
--exec CMD Execute CMD for each downloaded file. Example:
--exec "convert {} {}.png && rm {}"
--exec-after CMD Execute CMD after all files were downloaded
successfully. Example: --exec-after "cd {} &&
convert * ../doc.pdf"
-P, --postprocessor NAME Activate the specified post processor

@ -7,11 +7,11 @@
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Generate a Markdown document listing gallery-dl's command-line arguments"""
"""Generate a document listing gallery-dl's command-line arguments"""
import os
import re
from argparse import SUPPRESS
import sys
import util
from gallery_dl import option
@ -21,71 +21,22 @@ TEMPLATE = """# Command-Line Options
<!-- auto-generated by {} -->
{}
"""
{}"""
TABLE = """
## {}
<table>
<tbody valign="top">
{}
</tbody>
</table>
""".format
parser = option.build_parser()
parser.usage = ""
ROW = """<tr>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
</tr>""".format
opts = parser.format_help()
opts = opts[8:] # strip 'usage'
opts = re.sub(r"(?m)^(\w+.*)", "## \\1", opts) # group names to headings
opts = opts.replace("\n ", "\n ") # indent by 4
tables = []
sub = re.compile(r"'([^']+)'").sub
nb_hyphen = "&#8209;" # non-breaking hyphen
outfile = sys.argv[1] if len(sys.argv) > 1 else util.path("docs", "options.md")
with open(outfile, "w", encoding="utf-8") as fp:
for group in option.build_parser()._action_groups[2:]:
tbody = []
for action in group._group_actions:
help = action.help
if help == SUPPRESS:
continue
meta = action.metavar or ""
try:
short, long = action.option_strings
except ValueError:
try:
long = action.option_strings[0]
except IndexError:
continue
short = ""
if short:
short = "<code>" + short.replace("-", nb_hyphen) + "</code>"
if long:
long = "<code>" + long.replace("-", nb_hyphen) + "</code>"
if meta:
meta = "<code>" + meta.partition("[")[0] + "</code>"
if help:
help = help.replace("<", "&lt;").replace(">", "&gt;")
if "Example: " in help:
help, sep, example = help.partition("Example: ")
help = sub("<code>\\1</code>", help) + "<br />" + sep + example
else:
help = sub("<code>\\1</code>", help)
tbody.append(ROW(short, long, meta, help))
tables.append(TABLE(group.title, "\n".join(tbody)))
with open(util.path("docs", "options.md"), "w", encoding="utf-8") as fp:
fp.write(TEMPLATE.format(
"/".join(os.path.normpath(__file__).split(os.sep)[-2:]),
"\n".join(tables),
opts,
))

Loading…
Cancel
Save