diff --git a/archive.py b/archive.py
index 52a41f33..0969e52e 100755
--- a/archive.py
+++ b/archive.py
@@ -12,23 +12,65 @@ import json
from datetime import datetime
import time
-from subprocess import run, DEVNULL
+from subprocess import run, PIPE, DEVNULL
+
+
+### SETTINGS
INDEX_TEMPLATE = 'index_template.html'
FETCH_WGET = True
FETCH_PDF = True
FETCH_SCREENSHOT = True
+RESOLUTION = '1440,900' # screenshot resolution
FETCH_FAVICON = True
-RESOLUTION = '1440,900'
+SUBMIT_ARCHIVE_DOT_ORG = True
+
+CHROME_BINARY = 'google-chrome' # change to chromium browser if using chromium
+WGET_BINARY = 'wget'
+
def check_dependencies():
- for dependency in ('google-chrome', 'wget'):
- if run(['which', dependency]).returncode:
- print('[X] Missing dependency: {}'.format(dependency))
+ print('[*] Checking Dependencies:')
+ if FETCH_PDF or FETCH_SCREENSHOT:
+ if run(['which', CHROME_BINARY]).returncode:
+ print('[X] Missing dependency: {}'.format(CHROME_BINARY))
print(' See https://github.com/pirate/pocket-archive-stream for help.')
raise SystemExit(1)
+ # parse chrome --version e.g. Google Chrome 61.0.3114.0 canary / Chromium 59.0.3029.110 built on Ubuntu, running on Ubuntu 16.04
+ result = run([CHROME_BINARY, '--version'], stdout=PIPE)
+ version = result.stdout.decode('utf-8').replace('Google Chrome ', '').replace('Chromium ', '').split(' ', 1)[0].split('.', 1)[0] # TODO: regex might be better
+ if int(version) < 59:
+ print('[X] Chrome version must be 59 or greater for headless PDF and screenshot saving')
+ print(' See https://github.com/pirate/pocket-archive-stream for help.')
+ raise SystemExit(1)
+
+ if FETCH_WGET:
+ if run(['which', 'wget']).returncode:
+ print('[X] Missing dependency: wget')
+ print(' See https://github.com/pirate/pocket-archive-stream for help.')
+ raise SystemExit(1)
+
+ if FETCH_FAVICON or SUBMIT_ARCHIVE_DOT_ORG:
+ if run(['which', 'curl']).returncode:
+ print('[X] Missing dependency: curl')
+ print(' See https://github.com/pirate/pocket-archive-stream for help.')
+ raise SystemExit(1)
+
+
+### PARSING READER LIST EXPORTS
+
+def get_link_type(link):
+ if link['base_url'].endswith('.pdf'):
+ return 'PDF'
+ elif link['base_url'].rsplit('.', 1) in ('pdf', 'png', 'jpg', 'jpeg', 'svg', 'bmp', 'gif', 'tiff', 'webp'):
+ return 'image'
+ elif 'wikipedia.org' in link['domain']:
+ return 'wiki'
+ elif 'youtube.com' in link['domain']:
+ return 'youtube'
+ return None
def parse_pocket_export(html):
pattern = re.compile("^\\s*
(.+)", re.UNICODE) # see sample input in ./example_ril_export.html
@@ -37,22 +79,24 @@ def parse_pocket_export(html):
if match:
fixed_url = match.group(1).replace('http://www.readability.com/read?url=', '') # remove old readability prefixes to get original url
without_scheme = fixed_url.replace('http://', '').replace('https://', '')
- yield {
+ info = {
'url': fixed_url,
'domain': without_scheme.split('/')[0], # without pathname
'base_url': without_scheme.split('?')[0], # without query args
- 'time': datetime.fromtimestamp(int(match.group(2))),
+ 'time': datetime.fromtimestamp(int(match.group(2))).strftime('%Y-%m-%d %H:%M'),
'timestamp': match.group(2),
'tags': match.group(3),
'title': match.group(4).replace(' — Readability', '').replace('http://www.readability.com/read?url=', '') or without_scheme,
}
+ info['type'] = get_link_type(info)
+ yield info
def parse_pinboard_export(html):
json_content = json.load(html)
for line in json_content:
if line:
erg = line
- yield {
+ info = {
'url': erg['href'],
'domain': erg['href'].replace('http://', '').replace('https://', '').split('/')[0],
'base_url': erg['href'].replace('https://', '').replace('http://', '').split('?')[0],
@@ -61,29 +105,11 @@ def parse_pinboard_export(html):
'tags': erg['tags'],
'title': erg['description'].replace(' — Readability', ''),
}
+ info['type'] = get_link_type(info)
+ yield info
-def dump_index(links, service):
- with open(INDEX_TEMPLATE, 'r') as f:
- index_html = f.read()
- link_html = """\
-
- {time} |
-
-
- {title}
- |
- 📂 |
- 📄 |
- 🖼 |
- 🔗 {url} |
-
"""
-
- with open(''.join((service,'/index.html')), 'w') as f:
- article_rows = '\n'.join(
- link_html.format(**link) for link in links
- )
- f.write(index_html.format(datetime.now().strftime('%Y-%m-%d %H:%M'), article_rows))
+### ACHIVING FUNCTIONS
def fetch_wget(out_dir, link, overwrite=False):
# download full site
@@ -104,9 +130,9 @@ def fetch_pdf(out_dir, link, overwrite=False):
# download PDF
if (not os.path.exists('{}/output.pdf'.format(out_dir)) or overwrite) and not link['base_url'].endswith('.pdf'):
print(' - Printing PDF')
- CMD = 'google-chrome --headless --disable-gpu --print-to-pdf'.split(' ')
+ chrome_args = '--headless --disable-gpu --print-to-pdf'.split(' ')
try:
- run([*CMD, link['url']], stdout=DEVNULL, stderr=DEVNULL, cwd=out_dir, timeout=20) # output.pdf
+ run([CHROME_BINARY, *chrome_args, link['url']], stdout=DEVNULL, stderr=DEVNULL, cwd=out_dir, timeout=20) # output.pdf
except Exception as e:
print(' Exception: {}'.format(e.__class__.__name__))
else:
@@ -116,14 +142,41 @@ def fetch_screenshot(out_dir, link, overwrite=False):
# take screenshot
if (not os.path.exists('{}/screenshot.png'.format(out_dir)) or overwrite) and not link['base_url'].endswith('.pdf'):
print(' - Snapping Screenshot')
- CMD = 'google-chrome --headless --disable-gpu --screenshot'.split(' ')
+ chrome_args = '--headless --disable-gpu --screenshot'.split(' ')
try:
- run([*CMD, '--window-size={}'.format(RESOLUTION), link['url']], stdout=DEVNULL, stderr=DEVNULL, cwd=out_dir, timeout=20) # sreenshot.png
+ run([CHROME_BINARY, *chrome_args, '--window-size={}'.format(RESOLUTION), link['url']], stdout=DEVNULL, stderr=DEVNULL, cwd=out_dir, timeout=20) # sreenshot.png
except Exception as e:
print(' Exception: {}'.format(e.__class__.__name__))
else:
print(' √ Skipping screenshot')
+def archive_dot_org(out_dir, link, overwrite=False):
+ # submit to archive.org
+ if (not os.path.exists('{}/archive.org.txt'.format(out_dir)) or overwrite):
+ print(' - Submitting to archive.org')
+ submit_url = 'https://web.archive.org/save/{}'.format(link['base_url'])
+
+ success = False
+ try:
+ result = run(['curl', '-I', submit_url], stdout=PIPE, stderr=DEVNULL, cwd=out_dir, timeout=20) # archive.org
+ headers = result.stdout.splitlines()
+ content_location = [h for h in headers if b'Content-Location: ' in h]
+ if content_location:
+ archive_path = content_location[0].split(b'Content-Location: ', 1)[-1].decode('utf-8')
+ saved_url = 'https://web.archive.org{}'.format(archive_path)
+ success = True
+ else:
+ raise Exception('Failed to find Content-Location URL in Archive.org response headers.')
+ except Exception as e:
+ print(' Exception: {}'.format(e.__class__.__name__))
+
+ if success:
+ with open('{}/archive.org.txt'.format(out_dir), 'w') as f:
+ f.write(saved_url)
+
+ else:
+ print(' √ Skipping archive.org')
+
def fetch_favicon(out_dir, link, overwrite=False):
# download favicon
if not os.path.exists('{}/favicon.ico'.format(out_dir)) or overwrite:
@@ -139,21 +192,62 @@ def fetch_favicon(out_dir, link, overwrite=False):
print(' √ Skipping favicon')
+### ORCHESTRATION
+
+def dump_index(links, service):
+ with open(INDEX_TEMPLATE, 'r') as f:
+ index_html = f.read()
+
+ link_html = """\
+
+ {time} |
+
+
+ {title} {tags}
+ |
+ 📂 |
+ 📄 |
+ 🖼 |
+ 🏛 |
+ 🔗 {url} |
+
"""
+
+ def get_template_vars(link):
+ # since we dont screenshot or PDF links that are images or PDFs, change those links to point to the wget'ed file
+ link_info = {**link}
+
+ if link['type']:
+ link_info.update({'title': '{title} ({type})'.format(**link)})
+
+ if link['type'] in ('PDF', 'image'):
+ link_info.update({
+ 'pdf_link': 'archive/{timestamp}/{base_url}'.format(**link),
+ 'screenshot_link': 'archive/{timestamp}/{base_url}'.format(**link),
+ })
+ else:
+ link_info.update({
+ 'pdf_link': 'archive/{timestamp}/output.pdf'.format(**link),
+ 'screenshot_link': 'archive/{timestamp}/screenshot.png'.format(**link)
+ })
+ return link_info
+
+ with open(''.join((service, '/index.html')), 'w') as f:
+ article_rows = '\n'.join(
+ link_html.format(**get_template_vars(link)) for link in links
+ )
+ f.write(index_html.format(datetime.now().strftime('%Y-%m-%d %H:%M'), article_rows))
+
def dump_website(link, service, overwrite=False):
"""download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
- print('[+] [{time}] Archiving "{title}": {url}'.format(**link))
+ print('[+] [{time}] Archiving "{title}": {base_url}'.format(**link))
out_dir = ''.join((service, '/archive/{timestamp}')).format(**link)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
- if link['base_url'].endswith('.pdf'):
- print(' i PDF File')
- elif 'youtube.com' in link['domain']:
- print(' i Youtube Video')
- elif 'wikipedia.org' in link['domain']:
- print(' i Wikipedia Article')
+ if link['type']:
+ print(' i Type: {}'.format(link['type']))
if FETCH_WGET:
fetch_wget(out_dir, link, overwrite=overwrite)
@@ -164,21 +258,22 @@ def dump_website(link, service, overwrite=False):
if FETCH_SCREENSHOT:
fetch_screenshot(out_dir, link, overwrite=overwrite)
+ if SUBMIT_ARCHIVE_DOT_ORG:
+ archive_dot_org(out_dir, link, overwrite=overwrite)
+
if FETCH_FAVICON:
fetch_favicon(out_dir, link, overwrite=overwrite)
- run(['chmod', '-R', '755', out_dir], timeout=1)
-
-def create_archive(service_file, service, resume=None):
- print('[+] [{}] Starting {} archive from {}'.format(datetime.now(), service, service_file))
+def create_archive(export_file, service, resume=None):
+ print('[+] [{}] Starting {} archive from {}'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), service, export_file))
if not os.path.exists(service):
os.makedirs(service)
- if not os.path.exists(''.join((service,'/archive'))):
- os.makedirs(''.join((service,'/archive')))
+ if not os.path.exists(''.join((service, '/archive'))):
+ os.makedirs(''.join((service, '/archive')))
- with open(service_file, 'r', encoding='utf-8') as f:
+ with open(export_file, 'r', encoding='utf-8') as f:
if service == "pocket":
links = parse_pocket_export(f)
elif service == "pinboard":
@@ -188,17 +283,17 @@ def create_archive(service_file, service, resume=None):
links = [link for link in links if link['timestamp'] >= resume]
if not links:
- if service == "pocket":
- print('[X] No links found in {}, is it a getpocket.com/export export?'.format(service_file))
- elif service == "pinboard":
- print ('[X] No links found in {}, is it a pinboard.in/export/format:json/ export?'.format(service_file))
+ if service == 'pocket':
+ print('[X] No links found in {}, is it a getpocket.com/export export?'.format(export_file))
+ elif service == 'pinboard':
+ print('[X] No links found in {}, is it a pinboard.in/export/format:json/ export?'.format(export_file))
raise SystemExit(1)
dump_index(links, service)
run(['chmod', '-R', '755', service], timeout=1)
- print('[*] [{}] Created archive index.'.format(datetime.now()))
+ print('[*] [{}] Created archive index.'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
check_dependencies()
@@ -208,11 +303,11 @@ def create_archive(service_file, service, resume=None):
print('[√] [{}] Archive complete.'.format(datetime.now()))
-if __name__ == '__main__':
- service_file = 'ril_export.html'
- argc = len(sys.argv)
- service_file = sys.argv[1] if argc > 1 else "ril_export.html" # path to export file
- service = sys.argv[2] if argc > 2 else "pocket" # select service for file format select
- resume = sys.argv[3] if argc > 3 else None # timestamp to resume dowloading from
- create_archive(service_file, service, resume=resume)
+if __name__ == '__main__':
+ argc = len(sys.argv)
+ export_file = sys.argv[1] if argc > 1 else "ril_export.html" # path to export file
+ export_type = sys.argv[2] if argc > 2 else "pocket" # select export_type for file format select
+ resume_from = sys.argv[3] if argc > 3 else None # timestamp to resume dowloading from
+
+ create_archive(export_file, export_type, resume=resume_from)
diff --git a/index_template.html b/index_template.html
index 8ff48137..3570dd64 100644
--- a/index_template.html
+++ b/index_template.html
@@ -76,11 +76,12 @@
- Pocketed Date |
+ Pocketed Date |
Saved Article |
Files |
PDF |
- Screenshot |
+ Screenshot |
+ A.org |
Original URL |