mirror of
https://github.com/ArchiveBox/ArchiveBox.git
synced 2025-05-23 11:17:02 -04:00
new generic_html parser for extracting hrefs
This commit is contained in:
parent
a682a9c478
commit
15efb2d5ed
5 changed files with 106 additions and 39 deletions
|
@ -11,7 +11,7 @@ import re
|
|||
import os
|
||||
from io import StringIO
|
||||
|
||||
from typing import IO, Tuple, List
|
||||
from typing import IO, Tuple, List, Optional
|
||||
from datetime import datetime
|
||||
|
||||
from ..system import atomic_write
|
||||
|
@ -38,26 +38,29 @@ from .medium_rss import parse_medium_rss_export
|
|||
from .netscape_html import parse_netscape_html_export
|
||||
from .generic_rss import parse_generic_rss_export
|
||||
from .generic_json import parse_generic_json_export
|
||||
from .generic_html import parse_generic_html_export
|
||||
from .generic_txt import parse_generic_txt_export
|
||||
|
||||
PARSERS = (
|
||||
# Specialized parsers
|
||||
('Pocket HTML', parse_pocket_html_export),
|
||||
('Pinboard RSS', parse_pinboard_rss_export),
|
||||
('Shaarli RSS', parse_shaarli_rss_export),
|
||||
('Medium RSS', parse_medium_rss_export),
|
||||
|
||||
# General parsers
|
||||
('Netscape HTML', parse_netscape_html_export),
|
||||
('Generic RSS', parse_generic_rss_export),
|
||||
('Generic JSON', parse_generic_json_export),
|
||||
# Specialized parsers
|
||||
('Pocket HTML', parse_pocket_html_export),
|
||||
('Pinboard RSS', parse_pinboard_rss_export),
|
||||
('Shaarli RSS', parse_shaarli_rss_export),
|
||||
('Medium RSS', parse_medium_rss_export),
|
||||
|
||||
# General parsers
|
||||
('Netscape HTML', parse_netscape_html_export),
|
||||
('Generic RSS', parse_generic_rss_export),
|
||||
('Generic JSON', parse_generic_json_export),
|
||||
('Generic HTML', parse_generic_html_export),
|
||||
|
||||
# Fallback parser
|
||||
('Plain Text', parse_generic_txt_export),
|
||||
)
|
||||
|
||||
# Fallback parser
|
||||
('Plain Text', parse_generic_txt_export),
|
||||
)
|
||||
|
||||
@enforce_types
|
||||
def parse_links_memory(urls: List[str]):
|
||||
def parse_links_memory(urls: List[str], root_url: Optional[str]=None):
|
||||
"""
|
||||
parse a list of URLS without touching the filesystem
|
||||
"""
|
||||
|
@ -68,17 +71,16 @@ def parse_links_memory(urls: List[str]):
|
|||
file = StringIO()
|
||||
file.writelines(urls)
|
||||
file.name = "io_string"
|
||||
output = _parse(file, timer)
|
||||
|
||||
if output is not None:
|
||||
return output
|
||||
|
||||
links, parser = run_parser_functions(file, timer, root_url=root_url)
|
||||
timer.end()
|
||||
return [], 'Failed to parse'
|
||||
|
||||
if parser is None:
|
||||
return [], 'Failed to parse'
|
||||
return links, parser
|
||||
|
||||
|
||||
@enforce_types
|
||||
def parse_links(source_file: str) -> Tuple[List[Link], str]:
|
||||
def parse_links(source_file: str, root_url: Optional[str]=None) -> Tuple[List[Link], str]:
|
||||
"""parse a list of URLs with their metadata from an
|
||||
RSS feed, bookmarks export, or text file
|
||||
"""
|
||||
|
@ -87,28 +89,39 @@ def parse_links(source_file: str) -> Tuple[List[Link], str]:
|
|||
|
||||
timer = TimedProgress(TIMEOUT * 4)
|
||||
with open(source_file, 'r', encoding='utf-8') as file:
|
||||
output = _parse(file, timer)
|
||||
|
||||
if output is not None:
|
||||
return output
|
||||
links, parser = run_parser_functions(file, timer, root_url=root_url)
|
||||
|
||||
timer.end()
|
||||
return [], 'Failed to parse'
|
||||
if parser is None:
|
||||
return [], 'Failed to parse'
|
||||
return links, parser
|
||||
|
||||
|
||||
def run_parser_functions(to_parse: IO[str], timer, root_url: Optional[str]=None) -> Tuple[List[Link], Optional[str]]:
|
||||
most_links: List[Link] = []
|
||||
best_parser_name = None
|
||||
|
||||
def _parse(to_parse: IO[str], timer) -> Tuple[List[Link], str]:
|
||||
for parser_name, parser_func in PARSERS:
|
||||
try:
|
||||
links = list(parser_func(to_parse))
|
||||
if links:
|
||||
timer.end()
|
||||
return links, parser_name
|
||||
except Exception as err: # noqa
|
||||
pass
|
||||
parsed_links = list(parser_func(to_parse, root_url=root_url))
|
||||
if not parsed_links:
|
||||
raise Exception('no links found')
|
||||
|
||||
# print(f'[√] Parser {parser_name} succeeded: {len(parsed_links)} links parsed')
|
||||
if len(parsed_links) > len(most_links):
|
||||
most_links = parsed_links
|
||||
best_parser_name = parser_name
|
||||
|
||||
except Exception as err: # noqa
|
||||
# Parsers are tried one by one down the list, and the first one
|
||||
# that succeeds is used. To see why a certain parser was not used
|
||||
# due to error or format incompatibility, uncomment this line:
|
||||
|
||||
# print('[!] Parser {} failed: {} {}'.format(parser_name, err.__class__.__name__, err))
|
||||
# raise
|
||||
pass
|
||||
timer.end()
|
||||
return most_links, best_parser_name
|
||||
|
||||
|
||||
@enforce_types
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue