mirror of
https://github.com/ArchiveBox/ArchiveBox.git
synced 2025-05-14 15:14:31 -04:00
add plain text link parsing
This commit is contained in:
parent
7a9487fad9
commit
cf9d1875c7
2 changed files with 55 additions and 10 deletions
|
@ -19,6 +19,7 @@ Parsed link schema: {
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
import urllib
|
||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
@ -28,6 +29,8 @@ from util import (
|
||||||
base_url,
|
base_url,
|
||||||
str_between,
|
str_between,
|
||||||
get_link_type,
|
get_link_type,
|
||||||
|
fetch_page_title,
|
||||||
|
URL_REGEX,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -41,6 +44,7 @@ def get_parsers(file):
|
||||||
'rss': parse_rss_export,
|
'rss': parse_rss_export,
|
||||||
'pinboard_rss': parse_pinboard_rss_feed,
|
'pinboard_rss': parse_pinboard_rss_feed,
|
||||||
'medium_rss': parse_medium_rss_feed,
|
'medium_rss': parse_medium_rss_feed,
|
||||||
|
'plain_text': parse_plain_text,
|
||||||
}
|
}
|
||||||
|
|
||||||
def parse_links(path):
|
def parse_links(path):
|
||||||
|
@ -109,14 +113,14 @@ def parse_json_export(json_file):
|
||||||
if erg.get('description'):
|
if erg.get('description'):
|
||||||
title = (erg.get('description') or '').replace(' — Readability', '')
|
title = (erg.get('description') or '').replace(' — Readability', '')
|
||||||
else:
|
else:
|
||||||
title = erg['title']
|
title = erg['title'].strip()
|
||||||
info = {
|
info = {
|
||||||
'url': url,
|
'url': url,
|
||||||
'domain': domain(url),
|
'domain': domain(url),
|
||||||
'base_url': base_url(url),
|
'base_url': base_url(url),
|
||||||
'timestamp': timestamp,
|
'timestamp': timestamp,
|
||||||
'tags': erg.get('tags') or '',
|
'tags': erg.get('tags') or '',
|
||||||
'title': title,
|
'title': title or fetch_page_title(url),
|
||||||
'sources': [json_file.name],
|
'sources': [json_file.name],
|
||||||
}
|
}
|
||||||
info['type'] = get_link_type(info)
|
info['type'] = get_link_type(info)
|
||||||
|
@ -144,7 +148,7 @@ def parse_rss_export(rss_file):
|
||||||
def get_row(key):
|
def get_row(key):
|
||||||
return [r for r in rows if r.startswith('<{}>'.format(key))][0]
|
return [r for r in rows if r.startswith('<{}>'.format(key))][0]
|
||||||
|
|
||||||
title = str_between(get_row('title'), '<![CDATA[', ']]')
|
title = str_between(get_row('title'), '<![CDATA[', ']]').strip()
|
||||||
url = str_between(get_row('link'), '<link>', '</link>')
|
url = str_between(get_row('link'), '<link>', '</link>')
|
||||||
ts_str = str_between(get_row('pubDate'), '<pubDate>', '</pubDate>')
|
ts_str = str_between(get_row('pubDate'), '<pubDate>', '</pubDate>')
|
||||||
time = datetime.strptime(ts_str, "%a, %d %b %Y %H:%M:%S %z")
|
time = datetime.strptime(ts_str, "%a, %d %b %Y %H:%M:%S %z")
|
||||||
|
@ -155,7 +159,7 @@ def parse_rss_export(rss_file):
|
||||||
'base_url': base_url(url),
|
'base_url': base_url(url),
|
||||||
'timestamp': str(time.timestamp()),
|
'timestamp': str(time.timestamp()),
|
||||||
'tags': '',
|
'tags': '',
|
||||||
'title': title,
|
'title': title or fetch_page_title(url),
|
||||||
'sources': [rss_file.name],
|
'sources': [rss_file.name],
|
||||||
}
|
}
|
||||||
info['type'] = get_link_type(info)
|
info['type'] = get_link_type(info)
|
||||||
|
@ -182,7 +186,7 @@ def parse_bookmarks_export(html_file):
|
||||||
'base_url': base_url(url),
|
'base_url': base_url(url),
|
||||||
'timestamp': str(time.timestamp()),
|
'timestamp': str(time.timestamp()),
|
||||||
'tags': "",
|
'tags': "",
|
||||||
'title': match.group(3),
|
'title': match.group(3).strip() or fetch_page_title(url),
|
||||||
'sources': [html_file.name],
|
'sources': [html_file.name],
|
||||||
}
|
}
|
||||||
info['type'] = get_link_type(info)
|
info['type'] = get_link_type(info)
|
||||||
|
@ -198,7 +202,7 @@ def parse_pinboard_rss_feed(rss_file):
|
||||||
for item in items:
|
for item in items:
|
||||||
url = item.find("{http://purl.org/rss/1.0/}link").text
|
url = item.find("{http://purl.org/rss/1.0/}link").text
|
||||||
tags = item.find("{http://purl.org/dc/elements/1.1/}subject").text
|
tags = item.find("{http://purl.org/dc/elements/1.1/}subject").text
|
||||||
title = item.find("{http://purl.org/rss/1.0/}title").text
|
title = item.find("{http://purl.org/rss/1.0/}title").text.strip()
|
||||||
ts_str = item.find("{http://purl.org/dc/elements/1.1/}date").text
|
ts_str = item.find("{http://purl.org/dc/elements/1.1/}date").text
|
||||||
# = 🌈🌈🌈🌈
|
# = 🌈🌈🌈🌈
|
||||||
# = 🌈🌈🌈🌈
|
# = 🌈🌈🌈🌈
|
||||||
|
@ -215,7 +219,7 @@ def parse_pinboard_rss_feed(rss_file):
|
||||||
'base_url': base_url(url),
|
'base_url': base_url(url),
|
||||||
'timestamp': str(time.timestamp()),
|
'timestamp': str(time.timestamp()),
|
||||||
'tags': tags,
|
'tags': tags,
|
||||||
'title': title,
|
'title': title or fetch_page_title(url),
|
||||||
'sources': [rss_file.name],
|
'sources': [rss_file.name],
|
||||||
}
|
}
|
||||||
info['type'] = get_link_type(info)
|
info['type'] = get_link_type(info)
|
||||||
|
@ -231,7 +235,7 @@ def parse_medium_rss_feed(rss_file):
|
||||||
# for child in item:
|
# for child in item:
|
||||||
# print(child.tag, child.text)
|
# print(child.tag, child.text)
|
||||||
url = item.find("link").text
|
url = item.find("link").text
|
||||||
title = item.find("title").text
|
title = item.find("title").text.strip()
|
||||||
ts_str = item.find("pubDate").text
|
ts_str = item.find("pubDate").text
|
||||||
time = datetime.strptime(ts_str, "%a, %d %b %Y %H:%M:%S %Z")
|
time = datetime.strptime(ts_str, "%a, %d %b %Y %H:%M:%S %Z")
|
||||||
info = {
|
info = {
|
||||||
|
@ -239,9 +243,35 @@ def parse_medium_rss_feed(rss_file):
|
||||||
'domain': domain(url),
|
'domain': domain(url),
|
||||||
'base_url': base_url(url),
|
'base_url': base_url(url),
|
||||||
'timestamp': str(time.timestamp()),
|
'timestamp': str(time.timestamp()),
|
||||||
'tags': "",
|
'tags': '',
|
||||||
'title': title,
|
'title': title or fetch_page_title(url),
|
||||||
'sources': [rss_file.name],
|
'sources': [rss_file.name],
|
||||||
}
|
}
|
||||||
info['type'] = get_link_type(info)
|
info['type'] = get_link_type(info)
|
||||||
yield info
|
yield info
|
||||||
|
|
||||||
|
|
||||||
|
def parse_plain_text(text_file):
|
||||||
|
"""Parse raw links from each line in a text file"""
|
||||||
|
|
||||||
|
text_file.seek(0)
|
||||||
|
text_content = text_file.readlines()
|
||||||
|
for line in text_content:
|
||||||
|
if line:
|
||||||
|
urls = re.findall(URL_REGEX, line)
|
||||||
|
|
||||||
|
for url in urls:
|
||||||
|
timestamp = str(datetime.now().timestamp())
|
||||||
|
|
||||||
|
info = {
|
||||||
|
'url': url,
|
||||||
|
'domain': domain(url),
|
||||||
|
'base_url': base_url(url),
|
||||||
|
'timestamp': timestamp,
|
||||||
|
'tags': '',
|
||||||
|
'title': fetch_page_title(url),
|
||||||
|
'sources': [text_file.name],
|
||||||
|
}
|
||||||
|
info['type'] = get_link_type(info)
|
||||||
|
yield info
|
||||||
|
|
||||||
|
|
|
@ -42,6 +42,8 @@ base_url = lambda url: without_scheme(url) # uniq base url used to dedupe links
|
||||||
|
|
||||||
short_ts = lambda ts: ts.split('.')[0]
|
short_ts = lambda ts: ts.split('.')[0]
|
||||||
|
|
||||||
|
URL_REGEX = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
|
||||||
|
|
||||||
|
|
||||||
def check_dependencies():
|
def check_dependencies():
|
||||||
"""Check that all necessary dependencies are installed, and have valid versions"""
|
"""Check that all necessary dependencies are installed, and have valid versions"""
|
||||||
|
@ -208,6 +210,19 @@ def download_url(url):
|
||||||
|
|
||||||
return source_path
|
return source_path
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_page_title(url, default=None):
|
||||||
|
"""Attempt to guess a page's title by downloading the html"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
html_content = urllib.request.urlopen(url).read().decode('utf-8')
|
||||||
|
|
||||||
|
match = re.search('<title>(.*?)</title>', html_content)
|
||||||
|
return match.group(1) if match else default
|
||||||
|
except Exception:
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
def str_between(string, start, end=None):
|
def str_between(string, start, end=None):
|
||||||
"""(<abc>12345</def>, <abc>, </def>) -> 12345"""
|
"""(<abc>12345</def>, <abc>, </def>) -> 12345"""
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue