replace uses of URL_REGEX with find_all_urls to handle markdown better

This commit is contained in:
Nick Sweeting 2024-04-24 17:45:45 -07:00
parent 3afdd3d96f
commit beb3932d80
No known key found for this signature in database
4 changed files with 60 additions and 71 deletions

View file

@ -11,7 +11,7 @@ from ..index.schema import Link
from ..util import (
htmldecode,
enforce_types,
URL_REGEX
find_all_urls,
)
@ -39,7 +39,7 @@ def parse_generic_txt_export(text_file: IO[str], **_kwargs) -> Iterable[Link]:
pass
# otherwise look for anything that looks like a URL in the line
for url in re.findall(URL_REGEX, line):
for url in find_all_urls(line):
yield Link(
url=htmldecode(url),
timestamp=str(datetime.now(timezone.utc).timestamp()),
@ -48,17 +48,6 @@ def parse_generic_txt_export(text_file: IO[str], **_kwargs) -> Iterable[Link]:
sources=[text_file.name],
)
# look inside the URL for any sub-urls, e.g. for archive.org links
# https://web.archive.org/web/20200531203453/https://www.reddit.com/r/socialism/comments/gu24ke/nypd_officers_claim_they_are_protecting_the_rule/fsfq0sw/
# -> https://www.reddit.com/r/socialism/comments/gu24ke/nypd_officers_claim_they_are_protecting_the_rule/fsfq0sw/
for sub_url in re.findall(URL_REGEX, line[1:]):
yield Link(
url=htmldecode(sub_url),
timestamp=str(datetime.now(timezone.utc).timestamp()),
title=None,
tags=None,
sources=[text_file.name],
)
KEY = 'txt'
NAME = 'Generic TXT'