refactor: Initial and dirty refactor to replace link with snapshot. Barely functional add command

This commit is contained in:
Cristian 2020-12-23 14:51:42 -05:00
parent 8e2270e21b
commit 8c4ae73d65
13 changed files with 246 additions and 233 deletions

View file

@ -14,6 +14,8 @@ from typing import IO, Tuple, List, Optional
from datetime import datetime
from pathlib import Path
from django.db.models import Model
from ..system import atomic_write
from ..config import (
ANSI,
@ -84,7 +86,7 @@ def parse_links_memory(urls: List[str], root_url: Optional[str]=None):
@enforce_types
def parse_links(source_file: str, root_url: Optional[str]=None) -> Tuple[List[Link], str]:
def parse_snapshots(source_file: str, root_url: Optional[str]=None) -> Tuple[List[Model], str]:
"""parse a list of URLs with their metadata from an
RSS feed, bookmarks export, or text file
"""
@ -93,27 +95,27 @@ def parse_links(source_file: str, root_url: Optional[str]=None) -> Tuple[List[Li
timer = TimedProgress(TIMEOUT * 4)
with open(source_file, 'r', encoding='utf-8') as file:
links, parser = run_parser_functions(file, timer, root_url=root_url)
snapshots, parser = run_parser_functions(file, timer, root_url=root_url)
timer.end()
if parser is None:
return [], 'Failed to parse'
return links, parser
return snapshots, parser
def run_parser_functions(to_parse: IO[str], timer, root_url: Optional[str]=None) -> Tuple[List[Link], Optional[str]]:
most_links: List[Link] = []
def run_parser_functions(to_parse: IO[str], timer, root_url: Optional[str]=None) -> Tuple[List[Model], Optional[str]]:
most_snapshots: List[Model] = []
best_parser_name = None
for parser_name, parser_func in PARSERS:
try:
parsed_links = list(parser_func(to_parse, root_url=root_url))
if not parsed_links:
parsed_snapshots = list(parser_func(to_parse, root_url=root_url))
if not parsed_snapshots:
raise Exception('no links found')
# print(f'[√] Parser {parser_name} succeeded: {len(parsed_links)} links parsed')
if len(parsed_links) > len(most_links):
most_links = parsed_links
if len(parsed_snapshots) > len(most_snapshots):
most_snapshots = parsed_snapshots
best_parser_name = parser_name
except Exception as err: # noqa
@ -125,7 +127,7 @@ def run_parser_functions(to_parse: IO[str], timer, root_url: Optional[str]=None)
# raise
pass
timer.end()
return most_links, best_parser_name
return most_snapshots, best_parser_name
@enforce_types

View file

@ -31,6 +31,7 @@ class HrefParser(HTMLParser):
@enforce_types
def parse_generic_html_export(html_file: IO[str], root_url: Optional[str]=None, **_kwargs) -> Iterable[Link]:
"""Parse Generic HTML for href tags and use only the url (support for title coming later)"""
from core.models import Snapshot
html_file.seek(0)
for line in html_file:
@ -44,10 +45,10 @@ def parse_generic_html_export(html_file: IO[str], root_url: Optional[str]=None,
url = urljoin(root_url, url)
for archivable_url in re.findall(URL_REGEX, url):
yield Link(
yield Snapshot(
url=htmldecode(archivable_url),
timestamp=str(datetime.now().timestamp()),
title=None,
tags=None,
sources=[html_file.name],
#tags=None,
#sources=[html_file.name],
)

View file

@ -18,6 +18,8 @@ from ..util import (
@enforce_types
def parse_generic_txt_export(text_file: IO[str], **_kwargs) -> Iterable[Link]:
"""Parse raw links from each line in a text file"""
# TODO: Check if we should add sources list to the database
from core.models import Snapshot
text_file.seek(0)
for line in text_file.readlines():
@ -40,22 +42,22 @@ def parse_generic_txt_export(text_file: IO[str], **_kwargs) -> Iterable[Link]:
# otherwise look for anything that looks like a URL in the line
for url in re.findall(URL_REGEX, line):
yield Link(
yield Snapshot(
url=htmldecode(url),
timestamp=str(datetime.now().timestamp()),
title=None,
tags=None,
sources=[text_file.name],
#tags=None,
#sources=[text_file.name],
)
# look inside the URL for any sub-urls, e.g. for archive.org links
# https://web.archive.org/web/20200531203453/https://www.reddit.com/r/socialism/comments/gu24ke/nypd_officers_claim_they_are_protecting_the_rule/fsfq0sw/
# -> https://www.reddit.com/r/socialism/comments/gu24ke/nypd_officers_claim_they_are_protecting_the_rule/fsfq0sw/
for url in re.findall(URL_REGEX, line[1:]):
yield Link(
yield Snapshot(
url=htmldecode(url),
timestamp=str(datetime.now().timestamp()),
title=None,
tags=None,
sources=[text_file.name],
#tags=None,
#sources=[text_file.name],
)