mirror of
https://github.com/ArchiveBox/ArchiveBox.git
synced 2025-05-13 14:44:29 -04:00
Add log print for url indexing
This commit is contained in:
parent
0acf479b70
commit
db9c2edccc
2 changed files with 9 additions and 2 deletions
|
@ -8,7 +8,7 @@ from archivebox.index.schema import Link
|
||||||
from archivebox.util import enforce_types
|
from archivebox.util import enforce_types
|
||||||
from archivebox.config import setup_django,stderr, OUTPUT_DIR, USE_INDEXING_BACKEND, USE_SEARCHING_BACKEND, SEARCH_BACKEND_ENGINE
|
from archivebox.config import setup_django,stderr, OUTPUT_DIR, USE_INDEXING_BACKEND, USE_SEARCHING_BACKEND, SEARCH_BACKEND_ENGINE
|
||||||
|
|
||||||
from .utils import get_indexable_content
|
from .utils import get_indexable_content, log_index_started
|
||||||
|
|
||||||
def indexing_enabled():
|
def indexing_enabled():
|
||||||
return USE_INDEXING_BACKEND
|
return USE_INDEXING_BACKEND
|
||||||
|
@ -98,4 +98,5 @@ def index_links(links: Union[List[Link],None], out_dir: Path=OUTPUT_DIR):
|
||||||
if snap := Snapshot.objects.filter(url=link.url).first():
|
if snap := Snapshot.objects.filter(url=link.url).first():
|
||||||
results = ArchiveResult.objects.indexable().filter(snapshot=snap)
|
results = ArchiveResult.objects.indexable().filter(snapshot=snap)
|
||||||
texts = get_indexable_content(results)
|
texts = get_indexable_content(results)
|
||||||
|
log_index_started(link.url)
|
||||||
write_search_index(link, texts, out_dir=out_dir)
|
write_search_index(link, texts, out_dir=out_dir)
|
||||||
|
|
|
@ -1,6 +1,11 @@
|
||||||
from django.db.models import QuerySet
|
from django.db.models import QuerySet
|
||||||
|
|
||||||
from archivebox.util import enforce_types
|
from archivebox.util import enforce_types
|
||||||
|
from archivebox.config import ANSI
|
||||||
|
|
||||||
|
def log_index_started(url):
|
||||||
|
print('{green}[*] Indexing url: {} in the search index {reset}'.format(url, **ANSI))
|
||||||
|
print( )
|
||||||
|
|
||||||
def get_file_result_content(res, extra_path, use_pwd=False):
|
def get_file_result_content(res, extra_path, use_pwd=False):
|
||||||
if use_pwd:
|
if use_pwd:
|
||||||
|
@ -12,7 +17,7 @@ def get_file_result_content(res, extra_path, use_pwd=False):
|
||||||
fpath = f'{fpath}/{extra_path}'
|
fpath = f'{fpath}/{extra_path}'
|
||||||
|
|
||||||
with open(fpath, 'r') as file:
|
with open(fpath, 'r') as file:
|
||||||
data = file.read().replace('\n', '')
|
data = file.read()
|
||||||
if data:
|
if data:
|
||||||
return [data]
|
return [data]
|
||||||
return []
|
return []
|
||||||
|
@ -28,6 +33,7 @@ def get_indexable_content(results: QuerySet):
|
||||||
if method not in ('readability', 'singlefile', 'dom', 'wget'):
|
if method not in ('readability', 'singlefile', 'dom', 'wget'):
|
||||||
return []
|
return []
|
||||||
# This should come from a plugin interface
|
# This should come from a plugin interface
|
||||||
|
|
||||||
if method == 'readability':
|
if method == 'readability':
|
||||||
return get_file_result_content(res, 'content.txt')
|
return get_file_result_content(res, 'content.txt')
|
||||||
elif method == 'singlefile':
|
elif method == 'singlefile':
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue