mirror of
https://github.com/ArchiveBox/ArchiveBox.git
synced 2025-05-13 14:44:29 -04:00
improve title extractor
This commit is contained in:
parent
bf432d4931
commit
de8e22efb7
3 changed files with 26 additions and 25 deletions
|
@ -42,7 +42,6 @@ from .headers import should_save_headers, save_headers
|
||||||
|
|
||||||
def get_default_archive_methods():
|
def get_default_archive_methods():
|
||||||
return [
|
return [
|
||||||
('title', should_save_title, save_title),
|
|
||||||
('favicon', should_save_favicon, save_favicon),
|
('favicon', should_save_favicon, save_favicon),
|
||||||
('headers', should_save_headers, save_headers),
|
('headers', should_save_headers, save_headers),
|
||||||
('singlefile', should_save_singlefile, save_singlefile),
|
('singlefile', should_save_singlefile, save_singlefile),
|
||||||
|
@ -50,7 +49,8 @@ def get_default_archive_methods():
|
||||||
('screenshot', should_save_screenshot, save_screenshot),
|
('screenshot', should_save_screenshot, save_screenshot),
|
||||||
('dom', should_save_dom, save_dom),
|
('dom', should_save_dom, save_dom),
|
||||||
('wget', should_save_wget, save_wget),
|
('wget', should_save_wget, save_wget),
|
||||||
('readability', should_save_readability, save_readability), # keep readability below wget and singlefile, as it depends on them
|
('title', should_save_title, save_title), # keep title and readability below wget and singlefile, as it depends on them
|
||||||
|
('readability', should_save_readability, save_readability),
|
||||||
('mercury', should_save_mercury, save_mercury),
|
('mercury', should_save_mercury, save_mercury),
|
||||||
('git', should_save_git, save_git),
|
('git', should_save_git, save_git),
|
||||||
('media', should_save_media, save_media),
|
('media', should_save_media, save_media),
|
||||||
|
@ -182,7 +182,7 @@ def archive_links(all_links: Union[Iterable[Link], QuerySet], overwrite: bool=Fa
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
log_archiving_paused(num_links, idx, link.timestamp)
|
log_archiving_paused(num_links, idx, link.timestamp)
|
||||||
raise SystemExit(0)
|
raise SystemExit(0)
|
||||||
except BaseException: # lgtm [py/catch-base-exception]
|
except BaseException:
|
||||||
print()
|
print()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -22,28 +22,8 @@ from ..config import (
|
||||||
READABILITY_VERSION,
|
READABILITY_VERSION,
|
||||||
)
|
)
|
||||||
from ..logging_util import TimedProgress
|
from ..logging_util import TimedProgress
|
||||||
|
from .title import get_html
|
||||||
|
|
||||||
@enforce_types
|
|
||||||
def get_html(link: Link, path: Path) -> str:
|
|
||||||
"""
|
|
||||||
Try to find wget, singlefile and then dom files.
|
|
||||||
If none is found, download the url again.
|
|
||||||
"""
|
|
||||||
canonical = link.canonical_outputs()
|
|
||||||
abs_path = path.absolute()
|
|
||||||
sources = [canonical["singlefile_path"], canonical["wget_path"], canonical["dom_path"]]
|
|
||||||
document = None
|
|
||||||
for source in sources:
|
|
||||||
try:
|
|
||||||
with open(abs_path / source, "r", encoding="utf-8") as f:
|
|
||||||
document = f.read()
|
|
||||||
break
|
|
||||||
except (FileNotFoundError, TypeError):
|
|
||||||
continue
|
|
||||||
if document is None:
|
|
||||||
return download_url(link.url)
|
|
||||||
else:
|
|
||||||
return document
|
|
||||||
|
|
||||||
@enforce_types
|
@enforce_types
|
||||||
def should_save_readability(link: Link, out_dir: Optional[str]=None, overwrite: Optional[bool]=False) -> bool:
|
def should_save_readability(link: Link, out_dir: Optional[str]=None, overwrite: Optional[bool]=False) -> bool:
|
||||||
|
|
|
@ -58,6 +58,27 @@ class TitleParser(HTMLParser):
|
||||||
if tag.lower() == "title":
|
if tag.lower() == "title":
|
||||||
self.inside_title_tag = False
|
self.inside_title_tag = False
|
||||||
|
|
||||||
|
@enforce_types
|
||||||
|
def get_html(link: Link, path: Path, timeout: int=TIMEOUT) -> str:
|
||||||
|
"""
|
||||||
|
Try to find wget, singlefile and then dom files.
|
||||||
|
If none is found, download the url again.
|
||||||
|
"""
|
||||||
|
canonical = link.canonical_outputs()
|
||||||
|
abs_path = path.absolute()
|
||||||
|
sources = [canonical["singlefile_path"], canonical["wget_path"], canonical["dom_path"]]
|
||||||
|
document = None
|
||||||
|
for source in sources:
|
||||||
|
try:
|
||||||
|
with open(abs_path / source, "r", encoding="utf-8") as f:
|
||||||
|
document = f.read()
|
||||||
|
break
|
||||||
|
except (FileNotFoundError, TypeError):
|
||||||
|
continue
|
||||||
|
if document is None:
|
||||||
|
return download_url(link.url, timeout=timeout)
|
||||||
|
else:
|
||||||
|
return document
|
||||||
|
|
||||||
@enforce_types
|
@enforce_types
|
||||||
def should_save_title(link: Link, out_dir: Optional[str]=None, overwrite: Optional[bool]=False) -> bool:
|
def should_save_title(link: Link, out_dir: Optional[str]=None, overwrite: Optional[bool]=False) -> bool:
|
||||||
|
@ -90,7 +111,7 @@ def save_title(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -
|
||||||
status = 'succeeded'
|
status = 'succeeded'
|
||||||
timer = TimedProgress(timeout, prefix=' ')
|
timer = TimedProgress(timeout, prefix=' ')
|
||||||
try:
|
try:
|
||||||
html = download_url(link.url, timeout=timeout)
|
html = get_html(link, out_dir, timeout=timeout)
|
||||||
try:
|
try:
|
||||||
# try using relatively strict html parser first
|
# try using relatively strict html parser first
|
||||||
parser = TitleParser()
|
parser = TitleParser()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue