mirror of
https://github.com/ArchiveBox/ArchiveBox.git
synced 2025-05-13 22:54:27 -04:00
Merge branch 'dev'
This commit is contained in:
commit
05da16db99
16 changed files with 147 additions and 35 deletions
|
@ -10,6 +10,7 @@ from typing import List, Optional, IO
|
||||||
|
|
||||||
from ..main import add
|
from ..main import add
|
||||||
from ..util import docstring
|
from ..util import docstring
|
||||||
|
from ..parsers import PARSERS
|
||||||
from ..config import OUTPUT_DIR, ONLY_NEW
|
from ..config import OUTPUT_DIR, ONLY_NEW
|
||||||
from ..logging_util import SmartFormatter, accept_stdin, stderr
|
from ..logging_util import SmartFormatter, accept_stdin, stderr
|
||||||
|
|
||||||
|
@ -79,6 +80,13 @@ def main(args: Optional[List[str]]=None, stdin: Optional[IO]=None, pwd: Optional
|
||||||
This does not take precedence over the configuration",
|
This does not take precedence over the configuration",
|
||||||
default=""
|
default=""
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--parser",
|
||||||
|
type=str,
|
||||||
|
help="Parser used to read inputted URLs.",
|
||||||
|
default="auto",
|
||||||
|
choices=["auto", *PARSERS.keys()],
|
||||||
|
)
|
||||||
command = parser.parse_args(args or ())
|
command = parser.parse_args(args or ())
|
||||||
urls = command.urls
|
urls = command.urls
|
||||||
|
|
||||||
|
@ -101,6 +109,7 @@ def main(args: Optional[List[str]]=None, stdin: Optional[IO]=None, pwd: Optional
|
||||||
overwrite=command.overwrite,
|
overwrite=command.overwrite,
|
||||||
init=command.init,
|
init=command.init,
|
||||||
extractors=command.extract,
|
extractors=command.extract,
|
||||||
|
parser=command.parser,
|
||||||
out_dir=pwd or OUTPUT_DIR,
|
out_dir=pwd or OUTPUT_DIR,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -265,14 +265,14 @@ def load_main_index_meta(out_dir: Path=OUTPUT_DIR) -> Optional[dict]:
|
||||||
|
|
||||||
|
|
||||||
@enforce_types
|
@enforce_types
|
||||||
def parse_links_from_source(source_path: str, root_url: Optional[str]=None) -> Tuple[List[Link], List[Link]]:
|
def parse_links_from_source(source_path: str, root_url: Optional[str]=None, parser: str="auto") -> Tuple[List[Link], List[Link]]:
|
||||||
|
|
||||||
from ..parsers import parse_links
|
from ..parsers import parse_links
|
||||||
|
|
||||||
new_links: List[Link] = []
|
new_links: List[Link] = []
|
||||||
|
|
||||||
# parse and validate the import file
|
# parse and validate the import file
|
||||||
raw_links, parser_name = parse_links(source_path, root_url=root_url)
|
raw_links, parser_name = parse_links(source_path, root_url=root_url, parser=parser)
|
||||||
new_links = validate_links(raw_links)
|
new_links = validate_links(raw_links)
|
||||||
|
|
||||||
if parser_name:
|
if parser_name:
|
||||||
|
|
|
@ -568,6 +568,7 @@ def add(urls: Union[str, List[str]],
|
||||||
overwrite: bool=False,
|
overwrite: bool=False,
|
||||||
init: bool=False,
|
init: bool=False,
|
||||||
extractors: str="",
|
extractors: str="",
|
||||||
|
parser: str="auto",
|
||||||
out_dir: Path=OUTPUT_DIR) -> List[Link]:
|
out_dir: Path=OUTPUT_DIR) -> List[Link]:
|
||||||
"""Add a new URL or list of URLs to your archive"""
|
"""Add a new URL or list of URLs to your archive"""
|
||||||
|
|
||||||
|
@ -594,7 +595,7 @@ def add(urls: Union[str, List[str]],
|
||||||
# save verbatim args to sources
|
# save verbatim args to sources
|
||||||
write_ahead_log = save_text_as_source('\n'.join(urls), filename='{ts}-import.txt', out_dir=out_dir)
|
write_ahead_log = save_text_as_source('\n'.join(urls), filename='{ts}-import.txt', out_dir=out_dir)
|
||||||
|
|
||||||
new_links += parse_links_from_source(write_ahead_log, root_url=None)
|
new_links += parse_links_from_source(write_ahead_log, root_url=None, parser=parser)
|
||||||
|
|
||||||
# If we're going one level deeper, download each link and look for more links
|
# If we're going one level deeper, download each link and look for more links
|
||||||
new_links_depth = []
|
new_links_depth = []
|
||||||
|
|
|
@ -31,36 +31,42 @@ from ..util import (
|
||||||
from ..index.schema import Link
|
from ..index.schema import Link
|
||||||
from ..logging_util import TimedProgress, log_source_saved
|
from ..logging_util import TimedProgress, log_source_saved
|
||||||
|
|
||||||
from .pocket_html import parse_pocket_html_export
|
from . import pocket_api
|
||||||
from .pocket_api import parse_pocket_api_export
|
from . import wallabag_atom
|
||||||
from .pinboard_rss import parse_pinboard_rss_export
|
from . import pocket_html
|
||||||
from .wallabag_atom import parse_wallabag_atom_export
|
from . import pinboard_rss
|
||||||
from .shaarli_rss import parse_shaarli_rss_export
|
from . import shaarli_rss
|
||||||
from .medium_rss import parse_medium_rss_export
|
from . import medium_rss
|
||||||
from .netscape_html import parse_netscape_html_export
|
|
||||||
from .generic_rss import parse_generic_rss_export
|
|
||||||
from .generic_json import parse_generic_json_export
|
|
||||||
from .generic_html import parse_generic_html_export
|
|
||||||
from .generic_txt import parse_generic_txt_export
|
|
||||||
|
|
||||||
PARSERS = (
|
from . import netscape_html
|
||||||
|
from . import generic_rss
|
||||||
|
from . import generic_json
|
||||||
|
from . import generic_html
|
||||||
|
from . import generic_txt
|
||||||
|
from . import url_list
|
||||||
|
|
||||||
|
|
||||||
|
PARSERS = {
|
||||||
# Specialized parsers
|
# Specialized parsers
|
||||||
('Pocket API', parse_pocket_api_export),
|
pocket_api.KEY: (pocket_api.NAME, pocket_api.PARSER),
|
||||||
('Wallabag ATOM', parse_wallabag_atom_export),
|
wallabag_atom.KEY: (wallabag_atom.NAME, wallabag_atom.PARSER),
|
||||||
('Pocket HTML', parse_pocket_html_export),
|
pocket_html.KEY: (pocket_html.NAME, pocket_html.PARSER),
|
||||||
('Pinboard RSS', parse_pinboard_rss_export),
|
pinboard_rss.KEY: (pinboard_rss.NAME, pinboard_rss.PARSER),
|
||||||
('Shaarli RSS', parse_shaarli_rss_export),
|
shaarli_rss.KEY: (shaarli_rss.NAME, shaarli_rss.PARSER),
|
||||||
('Medium RSS', parse_medium_rss_export),
|
medium_rss.KEY: (medium_rss.NAME, medium_rss.PARSER),
|
||||||
|
|
||||||
# General parsers
|
# General parsers
|
||||||
('Netscape HTML', parse_netscape_html_export),
|
netscape_html.KEY: (netscape_html.NAME, netscape_html.PARSER),
|
||||||
('Generic RSS', parse_generic_rss_export),
|
generic_rss.KEY: (generic_rss.NAME, generic_rss.PARSER),
|
||||||
('Generic JSON', parse_generic_json_export),
|
generic_json.KEY: (generic_json.NAME, generic_json.PARSER),
|
||||||
('Generic HTML', parse_generic_html_export),
|
generic_html.KEY: (generic_html.NAME, generic_html.PARSER),
|
||||||
|
|
||||||
# Fallback parser
|
# Catchall fallback parser
|
||||||
('Plain Text', parse_generic_txt_export),
|
generic_txt.KEY: (generic_txt.NAME, generic_txt.PARSER),
|
||||||
)
|
|
||||||
|
# Explicitly specified parsers
|
||||||
|
url_list.KEY: (url_list.NAME, url_list.PARSER),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@enforce_types
|
@enforce_types
|
||||||
|
@ -83,14 +89,14 @@ def parse_links_memory(urls: List[str], root_url: Optional[str]=None):
|
||||||
|
|
||||||
|
|
||||||
@enforce_types
|
@enforce_types
|
||||||
def parse_links(source_file: str, root_url: Optional[str]=None) -> Tuple[List[Link], str]:
|
def parse_links(source_file: str, root_url: Optional[str]=None, parser: str="auto") -> Tuple[List[Link], str]:
|
||||||
"""parse a list of URLs with their metadata from an
|
"""parse a list of URLs with their metadata from an
|
||||||
RSS feed, bookmarks export, or text file
|
RSS feed, bookmarks export, or text file
|
||||||
"""
|
"""
|
||||||
|
|
||||||
timer = TimedProgress(TIMEOUT * 4)
|
timer = TimedProgress(TIMEOUT * 4)
|
||||||
with open(source_file, 'r', encoding='utf-8') as file:
|
with open(source_file, 'r', encoding='utf-8') as file:
|
||||||
links, parser = run_parser_functions(file, timer, root_url=root_url)
|
links, parser = run_parser_functions(file, timer, root_url=root_url, parser=parser)
|
||||||
|
|
||||||
timer.end()
|
timer.end()
|
||||||
if parser is None:
|
if parser is None:
|
||||||
|
@ -98,11 +104,20 @@ def parse_links(source_file: str, root_url: Optional[str]=None) -> Tuple[List[Li
|
||||||
return links, parser
|
return links, parser
|
||||||
|
|
||||||
|
|
||||||
def run_parser_functions(to_parse: IO[str], timer, root_url: Optional[str]=None) -> Tuple[List[Link], Optional[str]]:
|
def run_parser_functions(to_parse: IO[str], timer, root_url: Optional[str]=None, parser: str="auto") -> Tuple[List[Link], Optional[str]]:
|
||||||
most_links: List[Link] = []
|
most_links: List[Link] = []
|
||||||
best_parser_name = None
|
best_parser_name = None
|
||||||
|
|
||||||
for parser_name, parser_func in PARSERS:
|
if parser != "auto":
|
||||||
|
parser_name, parser_func = PARSERS[parser]
|
||||||
|
parsed_links = list(parser_func(to_parse, root_url=root_url))
|
||||||
|
if not parsed_links:
|
||||||
|
raise Exception('no links found')
|
||||||
|
timer.end()
|
||||||
|
return parsed_links, parser_name
|
||||||
|
|
||||||
|
for parser_id in PARSERS:
|
||||||
|
parser_name, parser_func = PARSERS[parser_id]
|
||||||
try:
|
try:
|
||||||
parsed_links = list(parser_func(to_parse, root_url=root_url))
|
parsed_links = list(parser_func(to_parse, root_url=root_url))
|
||||||
if not parsed_links:
|
if not parsed_links:
|
||||||
|
|
|
@ -51,3 +51,8 @@ def parse_generic_html_export(html_file: IO[str], root_url: Optional[str]=None,
|
||||||
tags=None,
|
tags=None,
|
||||||
sources=[html_file.name],
|
sources=[html_file.name],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'html'
|
||||||
|
NAME = 'Generic HTML'
|
||||||
|
PARSER = parse_generic_html_export
|
||||||
|
|
|
@ -63,3 +63,8 @@ def parse_generic_json_export(json_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
tags=htmldecode(link.get('tags')) or '',
|
tags=htmldecode(link.get('tags')) or '',
|
||||||
sources=[json_file.name],
|
sources=[json_file.name],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'json'
|
||||||
|
NAME = 'Generic JSON'
|
||||||
|
PARSER = parse_generic_json_export
|
||||||
|
|
|
@ -47,3 +47,8 @@ def parse_generic_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
tags=None,
|
tags=None,
|
||||||
sources=[rss_file.name],
|
sources=[rss_file.name],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'rss'
|
||||||
|
NAME = 'Generic RSS'
|
||||||
|
PARSER = parse_generic_rss_export
|
||||||
|
|
|
@ -17,7 +17,7 @@ from ..util import (
|
||||||
|
|
||||||
@enforce_types
|
@enforce_types
|
||||||
def parse_generic_txt_export(text_file: IO[str], **_kwargs) -> Iterable[Link]:
|
def parse_generic_txt_export(text_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
"""Parse raw links from each line in a text file"""
|
"""Parse links from a text file, ignoring other text"""
|
||||||
|
|
||||||
text_file.seek(0)
|
text_file.seek(0)
|
||||||
for line in text_file.readlines():
|
for line in text_file.readlines():
|
||||||
|
@ -59,3 +59,7 @@ def parse_generic_txt_export(text_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
tags=None,
|
tags=None,
|
||||||
sources=[text_file.name],
|
sources=[text_file.name],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
KEY = 'txt'
|
||||||
|
NAME = 'Generic TXT'
|
||||||
|
PARSER = parse_generic_txt_export
|
||||||
|
|
|
@ -33,3 +33,8 @@ def parse_medium_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
tags=None,
|
tags=None,
|
||||||
sources=[rss_file.name],
|
sources=[rss_file.name],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'medium_rss'
|
||||||
|
NAME = 'Medium RSS'
|
||||||
|
PARSER = parse_medium_rss_export
|
||||||
|
|
|
@ -37,3 +37,7 @@ def parse_netscape_html_export(html_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
sources=[html_file.name],
|
sources=[html_file.name],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'netscape_html'
|
||||||
|
NAME = 'Netscape HTML'
|
||||||
|
PARSER = parse_netscape_html_export
|
||||||
|
|
|
@ -45,3 +45,8 @@ def parse_pinboard_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
tags=htmldecode(tags) or None,
|
tags=htmldecode(tags) or None,
|
||||||
sources=[rss_file.name],
|
sources=[rss_file.name],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'pinboard_rss'
|
||||||
|
NAME = 'Pinboard RSS'
|
||||||
|
PARSER = parse_pinboard_rss_export
|
||||||
|
|
|
@ -111,3 +111,8 @@ def parse_pocket_api_export(input_buffer: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
yield link_from_article(article, sources=[line])
|
yield link_from_article(article, sources=[line])
|
||||||
|
|
||||||
write_since(username, api.last_since)
|
write_since(username, api.last_since)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'pocket_api'
|
||||||
|
NAME = 'Pocket API'
|
||||||
|
PARSER = parse_pocket_api_export
|
||||||
|
|
|
@ -36,3 +36,8 @@ def parse_pocket_html_export(html_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
tags=tags or '',
|
tags=tags or '',
|
||||||
sources=[html_file.name],
|
sources=[html_file.name],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'pocket_html'
|
||||||
|
NAME = 'Pocket HTML'
|
||||||
|
PARSER = parse_pocket_html_export
|
||||||
|
|
|
@ -48,3 +48,8 @@ def parse_shaarli_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
tags=None,
|
tags=None,
|
||||||
sources=[rss_file.name],
|
sources=[rss_file.name],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'shaarli_rss'
|
||||||
|
NAME = 'Shaarli RSS'
|
||||||
|
PARSER = parse_shaarli_rss_export
|
||||||
|
|
34
archivebox/parsers/url_list.py
Normal file
34
archivebox/parsers/url_list.py
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
__package__ = 'archivebox.parsers'
|
||||||
|
__description__ = 'URL list'
|
||||||
|
|
||||||
|
from typing import IO, Iterable
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from ..index.schema import Link
|
||||||
|
from ..util import (
|
||||||
|
enforce_types
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@enforce_types
|
||||||
|
def parse_url_list(text_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
|
"""Parse raw URLs from each line in a text file"""
|
||||||
|
|
||||||
|
text_file.seek(0)
|
||||||
|
for line in text_file.readlines():
|
||||||
|
url = line.strip()
|
||||||
|
if not url:
|
||||||
|
continue
|
||||||
|
|
||||||
|
yield Link(
|
||||||
|
url=url,
|
||||||
|
timestamp=str(datetime.now().timestamp()),
|
||||||
|
title=None,
|
||||||
|
tags=None,
|
||||||
|
sources=[text_file.name],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'url_list'
|
||||||
|
NAME = 'URL List'
|
||||||
|
PARSER = parse_url_list
|
|
@ -55,3 +55,8 @@ def parse_wallabag_atom_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||||
tags=tags or '',
|
tags=tags or '',
|
||||||
sources=[rss_file.name],
|
sources=[rss_file.name],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
KEY = 'wallabag_atom'
|
||||||
|
NAME = 'Wallabag Atom'
|
||||||
|
PARSER = parse_wallabag_atom_export
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue