From 5b0ed26ab546b6ff7092b2e099ba19fa20acc20f Mon Sep 17 00:00:00 2001 From: hartator Date: Sat, 10 Jun 2017 15:54:15 -0500 Subject: [PATCH] Fix row align for GitHub repository preview --- README.md | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 2eb3e81..2ceb7d3 100644 --- a/README.md +++ b/README.md @@ -26,26 +26,26 @@ It will download the last version of every file present on Wayback Machine to `. ## Advanced Usage - Usage: wayback_machine_downloader http://example.com +Usage: wayback_machine_downloader http://example.com - Download an entire website from the Wayback Machine. +Download an entire website from the Wayback Machine. - Optional options: - -d, --directory PATH Directory to save the downloaded files into - Default is ./websites/ plus the domain name - -f, --from TIMESTAMP Only files on or after timestamp supplied (ie. 20060716231334) - -t, --to TIMESTAMP Only files on or before timestamp supplied (ie. 20100916231334) - -o, --only ONLY_FILTER Restrict downloading to urls that match this filter - (use // notation for the filter to be treated as a regex) - -x, --exclude EXCLUDE_FILTER Skip downloading of urls that match this filter - (use // notation for the filter to be treated as a regex) - -a, --all Expand downloading to error files (40x and 50x) and redirections (30x) - -c, --concurrency NUMBER Number of multiple files to dowload at a time - Default is one file at a time (ie. 20) - -p, --snapshot-pages NUMBER Maximum snapshot pages to consider (Default is 100) - Count an average of 150,000 snapshots per page - -l, --list Only list file urls in a JSON format with the archived timestamps, won't download anything. - -v, --version Display version + Optional options: + -d, --directory PATH Directory to save the downloaded files into + Default is ./websites/ plus the domain name + -f, --from TIMESTAMP Only files on or after timestamp supplied (ie. 20060716231334) + -t, --to TIMESTAMP Only files on or before timestamp supplied (ie. 20100916231334) + -o, --only ONLY_FILTER Restrict downloading to urls that match this filter + (use // notation for the filter to be treated as a regex) + -x, --exclude EXCLUDE_FILTER Skip downloading of urls that match this filter + (use // notation for the filter to be treated as a regex) + -a, --all Expand downloading to error files (40x and 50x) and redirections (30x) + -c, --concurrency NUMBER Number of multiple files to dowload at a time + Default is one file at a time (ie. 20) + -p, --maximum-snapshot NUMBER Maximum snapshot pages to consider (Default is 100) + Count an average of 150,000 snapshots per page + -l, --list Only list file urls in a JSON format with the archived timestamps, won't download anything. + -v, --version Display version