mirror of
https://github.com/ArchiveBox/ArchiveBox.git
synced 2025-05-29 05:55:28 -04:00
add new core and crawsl statemachine manager
Some checks failed
Build Docker image / buildx (push) Has been cancelled
Build Homebrew package / build (push) Has been cancelled
Run linters / lint (push) Has been cancelled
Build Pip package / build (push) Has been cancelled
Run tests / python_tests (ubuntu-22.04, 3.11) (push) Has been cancelled
Run tests / docker_tests (push) Has been cancelled
Build Debian package / build (push) Has been cancelled
Some checks failed
Build Docker image / buildx (push) Has been cancelled
Build Homebrew package / build (push) Has been cancelled
Run linters / lint (push) Has been cancelled
Build Pip package / build (push) Has been cancelled
Run tests / python_tests (ubuntu-22.04, 3.11) (push) Has been cancelled
Run tests / docker_tests (push) Has been cancelled
Build Debian package / build (push) Has been cancelled
This commit is contained in:
parent
41efd010f0
commit
48f8416762
18 changed files with 798 additions and 374 deletions
73
archivebox/core/actors.py
Normal file
73
archivebox/core/actors.py
Normal file
|
@ -0,0 +1,73 @@
|
|||
__package__ = 'archivebox.core'
|
||||
|
||||
from typing import ClassVar
|
||||
|
||||
from rich import print
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from django.utils import timezone
|
||||
from datetime import timedelta
|
||||
from core.models import Snapshot
|
||||
|
||||
from actors.actor import ActorType
|
||||
|
||||
|
||||
class SnapshotActor(ActorType[Snapshot]):
|
||||
|
||||
QUERYSET: ClassVar[QuerySet] = Snapshot.objects.filter(status='queued')
|
||||
CLAIM_WHERE: ClassVar[str] = 'status = "queued"' # the WHERE clause to filter the objects when atomically getting the next object from the queue
|
||||
CLAIM_SET: ClassVar[str] = 'status = "started"' # the SET clause to claim the object when atomically getting the next object from the queue
|
||||
CLAIM_ORDER: ClassVar[str] = 'created_at DESC' # the ORDER BY clause to sort the objects with when atomically getting the next object from the queue
|
||||
CLAIM_FROM_TOP: ClassVar[int] = 50 # the number of objects to consider when atomically getting the next object from the queue
|
||||
|
||||
# model_type: Type[ModelType]
|
||||
MAX_CONCURRENT_ACTORS: ClassVar[int] = 4 # min 2, max 8, up to 60% of available cpu cores
|
||||
MAX_TICK_TIME: ClassVar[int] = 60 # maximum duration in seconds to process a single object
|
||||
|
||||
def claim_sql_where(self) -> str:
|
||||
"""override this to implement a custom WHERE clause for the atomic claim step e.g. "status = 'queued' AND locked_by = NULL" """
|
||||
return self.CLAIM_WHERE
|
||||
|
||||
def claim_sql_set(self) -> str:
|
||||
"""override this to implement a custom SET clause for the atomic claim step e.g. "status = 'started' AND locked_by = {self.pid}" """
|
||||
retry_at = timezone.now() + timedelta(seconds=self.MAX_TICK_TIME)
|
||||
# format as 2024-10-31 10:14:33.240903
|
||||
retry_at_str = retry_at.strftime('%Y-%m-%d %H:%M:%S.%f')
|
||||
return f'{self.CLAIM_SET}, retry_at = {retry_at_str}'
|
||||
|
||||
def claim_sql_order(self) -> str:
|
||||
"""override this to implement a custom ORDER BY clause for the atomic claim step e.g. "created_at DESC" """
|
||||
return self.CLAIM_ORDER
|
||||
|
||||
def claim_from_top(self) -> int:
|
||||
"""override this to implement a custom number of objects to consider when atomically claiming the next object from the top of the queue"""
|
||||
return self.CLAIM_FROM_TOP
|
||||
|
||||
def tick(self, obj: Snapshot) -> None:
|
||||
"""override this to process the object"""
|
||||
print(f'[blue]🏃♂️ {self}.tick()[/blue]', obj.abid or obj.id)
|
||||
# For example:
|
||||
# do_some_task(obj)
|
||||
# do_something_else(obj)
|
||||
# obj._model.objects.filter(pk=obj.pk, status='started').update(status='success')
|
||||
# raise NotImplementedError('tick() must be implemented by the Actor subclass')
|
||||
|
||||
def on_shutdown(self, err: BaseException | None=None) -> None:
|
||||
print(f'[grey53]🏃♂️ {self}.on_shutdown() SHUTTING DOWN[/grey53]', err or '[green](gracefully)[/green]')
|
||||
# abx.pm.hook.on_actor_shutdown(self)
|
||||
|
||||
def on_tick_start(self, obj: Snapshot) -> None:
|
||||
# print(f'🏃♂️ {self}.on_tick_start()', obj.abid or obj.id)
|
||||
# abx.pm.hook.on_actor_tick_start(self, obj_to_process)
|
||||
# self.timer = TimedProgress(self.MAX_TICK_TIME, prefix=' ')
|
||||
pass
|
||||
|
||||
def on_tick_end(self, obj: Snapshot) -> None:
|
||||
# print(f'🏃♂️ {self}.on_tick_end()', obj.abid or obj.id)
|
||||
# abx.pm.hook.on_actor_tick_end(self, obj_to_process)
|
||||
# self.timer.end()
|
||||
pass
|
||||
|
||||
def on_tick_exception(self, obj: Snapshot, err: BaseException) -> None:
|
||||
print(f'[red]🏃♂️ {self}.on_tick_exception()[/red]', obj.abid or obj.id, err)
|
||||
# abx.pm.hook.on_actor_tick_exception(self, obj_to_process, err)
|
|
@ -8,21 +8,25 @@ import os
|
|||
import json
|
||||
|
||||
from pathlib import Path
|
||||
from datetime import timedelta
|
||||
|
||||
from django.db import models
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.text import slugify
|
||||
from django.utils import timezone
|
||||
from django.core.cache import cache
|
||||
from django.urls import reverse, reverse_lazy
|
||||
from django.db.models import Case, When, Value, IntegerField
|
||||
from django.contrib import admin
|
||||
from django.conf import settings
|
||||
|
||||
from statemachine.mixins import MachineMixin
|
||||
|
||||
from archivebox.config import CONSTANTS
|
||||
|
||||
from abid_utils.models import ABIDModel, ABIDField, AutoDateTimeField
|
||||
from queues.tasks import bg_archive_snapshot
|
||||
# from crawls.models import Crawl
|
||||
from crawls.models import Crawl
|
||||
# from machine.models import Machine, NetworkInterface
|
||||
|
||||
from archivebox.misc.system import get_dir_size
|
||||
|
@ -152,7 +156,7 @@ class SnapshotManager(models.Manager):
|
|||
return super().get_queryset().prefetch_related('tags', 'archiveresult_set') # .annotate(archiveresult_count=models.Count('archiveresult')).distinct()
|
||||
|
||||
|
||||
class Snapshot(ABIDModel):
|
||||
class Snapshot(ABIDModel, MachineMixin):
|
||||
abid_prefix = 'snp_'
|
||||
abid_ts_src = 'self.created_at'
|
||||
abid_uri_src = 'self.url'
|
||||
|
@ -160,6 +164,17 @@ class Snapshot(ABIDModel):
|
|||
abid_rand_src = 'self.id'
|
||||
abid_drift_allowed = True
|
||||
|
||||
state_field_name = 'status'
|
||||
state_machine_name = 'core.statemachines.SnapshotMachine'
|
||||
state_machine_attr = 'sm'
|
||||
|
||||
class SnapshotStatus(models.TextChoices):
|
||||
QUEUED = 'queued', 'Queued'
|
||||
STARTED = 'started', 'Started'
|
||||
SEALED = 'sealed', 'Sealed'
|
||||
|
||||
status = models.CharField(max_length=15, default=SnapshotStatus.QUEUED, null=False, blank=False)
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=None, null=False, editable=False, unique=True, verbose_name='ID')
|
||||
abid = ABIDField(prefix=abid_prefix)
|
||||
|
||||
|
@ -171,7 +186,7 @@ class Snapshot(ABIDModel):
|
|||
bookmarked_at = AutoDateTimeField(default=None, null=False, editable=True, db_index=True)
|
||||
downloaded_at = models.DateTimeField(default=None, null=True, editable=False, db_index=True, blank=True)
|
||||
|
||||
# crawl = models.ForeignKey(Crawl, on_delete=models.CASCADE, default=None, null=True, blank=True, related_name='snapshot_set')
|
||||
crawl = models.ForeignKey(Crawl, on_delete=models.CASCADE, default=None, null=True, blank=True, related_name='snapshot_set')
|
||||
|
||||
url = models.URLField(unique=True, db_index=True)
|
||||
timestamp = models.CharField(max_length=32, unique=True, db_index=True, editable=False)
|
||||
|
@ -396,6 +411,25 @@ class Snapshot(ABIDModel):
|
|||
tags_id.append(Tag.objects.get_or_create(name=tag)[0].pk)
|
||||
self.tags.clear()
|
||||
self.tags.add(*tags_id)
|
||||
|
||||
def has_pending_archiveresults(self) -> bool:
|
||||
pending_statuses = [ArchiveResult.ArchiveResultStatus.QUEUED, ArchiveResult.ArchiveResultStatus.STARTED]
|
||||
pending_archiveresults = self.archiveresult_set.filter(status__in=pending_statuses)
|
||||
return pending_archiveresults.exists()
|
||||
|
||||
def create_pending_archiveresults(self) -> list['ArchiveResult']:
|
||||
archiveresults = []
|
||||
for extractor in EXTRACTORS:
|
||||
archiveresult, _created = ArchiveResult.objects.get_or_create(
|
||||
snapshot=self,
|
||||
extractor=extractor,
|
||||
status=ArchiveResult.ArchiveResultStatus.QUEUED,
|
||||
)
|
||||
archiveresults.append(archiveresult)
|
||||
return archiveresults
|
||||
|
||||
def bump_retry_at(self, seconds: int = 10):
|
||||
self.retry_at = timezone.now() + timedelta(seconds=seconds)
|
||||
|
||||
|
||||
# def get_storage_dir(self, create=True, symlink=True) -> Path:
|
||||
|
@ -452,6 +486,20 @@ class ArchiveResult(ABIDModel):
|
|||
abid_subtype_src = 'self.extractor'
|
||||
abid_rand_src = 'self.id'
|
||||
abid_drift_allowed = True
|
||||
|
||||
state_field_name = 'status'
|
||||
state_machine_name = 'core.statemachines.ArchiveResultMachine'
|
||||
state_machine_attr = 'sm'
|
||||
|
||||
class ArchiveResultStatus(models.TextChoices):
|
||||
QUEUED = 'queued', 'Queued'
|
||||
STARTED = 'started', 'Started'
|
||||
SUCCEEDED = 'succeeded', 'Succeeded'
|
||||
FAILED = 'failed', 'Failed'
|
||||
SKIPPED = 'skipped', 'Skipped'
|
||||
BACKOFF = 'backoff', 'Waiting to retry'
|
||||
|
||||
status = models.CharField(max_length=15, choices=ArchiveResultStatus.choices, default=ArchiveResultStatus.QUEUED, null=False, blank=False)
|
||||
|
||||
EXTRACTOR_CHOICES = (
|
||||
('htmltotext', 'htmltotext'),
|
||||
|
@ -469,11 +517,7 @@ class ArchiveResult(ABIDModel):
|
|||
('title', 'title'),
|
||||
('wget', 'wget'),
|
||||
)
|
||||
STATUS_CHOICES = [
|
||||
("succeeded", "succeeded"),
|
||||
("failed", "failed"),
|
||||
("skipped", "skipped")
|
||||
]
|
||||
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=None, null=False, editable=False, unique=True, verbose_name='ID')
|
||||
abid = ABIDField(prefix=abid_prefix)
|
||||
|
@ -491,7 +535,6 @@ class ArchiveResult(ABIDModel):
|
|||
output = models.CharField(max_length=1024)
|
||||
start_ts = models.DateTimeField(db_index=True)
|
||||
end_ts = models.DateTimeField()
|
||||
status = models.CharField(max_length=16, choices=STATUS_CHOICES)
|
||||
|
||||
# the network interface that was used to download this result
|
||||
# uplink = models.ForeignKey(NetworkInterface, on_delete=models.SET_NULL, null=True, blank=True, verbose_name='Network Interface Used')
|
||||
|
@ -552,7 +595,15 @@ class ArchiveResult(ABIDModel):
|
|||
return link.canonical_outputs().get(f'{self.extractor}_path')
|
||||
|
||||
def output_exists(self) -> bool:
|
||||
return os.access(self.output_path(), os.R_OK)
|
||||
return os.path.exists(self.output_path())
|
||||
|
||||
def bump_retry_at(self, seconds: int = 10):
|
||||
self.retry_at = timezone.now() + timedelta(seconds=seconds)
|
||||
|
||||
def create_output_dir(self):
|
||||
snap_dir = self.snapshot_dir
|
||||
snap_dir.mkdir(parents=True, exist_ok=True)
|
||||
return snap_dir / self.output_path()
|
||||
|
||||
|
||||
# def get_storage_dir(self, create=True, symlink=True):
|
||||
|
|
|
@ -64,7 +64,8 @@ INSTALLED_APPS = [
|
|||
# 'abid_utils', # handles ABID ID creation, handling, and models
|
||||
'config', # ArchiveBox config settings (loaded as a plugin, don't need to add it here)
|
||||
'machine', # handles collecting and storing information about the host machine, network interfaces, installed binaries, etc.
|
||||
'queues', # handles starting and managing background workers and processes
|
||||
'actors', # handles starting and managing background workers and processes (orchestrators and actors)
|
||||
'queues', # handles starting and managing background workers and processes (supervisord)
|
||||
'seeds', # handles Seed model and URL source management
|
||||
'crawls', # handles Crawl and CrawlSchedule models and management
|
||||
'personas', # handles Persona and session management
|
||||
|
|
115
archivebox/core/statemachines.py
Normal file
115
archivebox/core/statemachines.py
Normal file
|
@ -0,0 +1,115 @@
|
|||
__package__ = 'archivebox.snapshots'
|
||||
|
||||
from django.utils import timezone
|
||||
|
||||
from statemachine import State, StateMachine
|
||||
|
||||
from core.models import Snapshot, ArchiveResult
|
||||
|
||||
# State Machine Definitions
|
||||
#################################################
|
||||
|
||||
|
||||
class SnapshotMachine(StateMachine, strict_states=True):
|
||||
"""State machine for managing Snapshot lifecycle."""
|
||||
|
||||
model: Snapshot
|
||||
|
||||
# States
|
||||
queued = State(value=Snapshot.SnapshotStatus.QUEUED, initial=True)
|
||||
started = State(value=Snapshot.SnapshotStatus.STARTED)
|
||||
sealed = State(value=Snapshot.SnapshotStatus.SEALED, final=True)
|
||||
|
||||
# Tick Event
|
||||
tick = (
|
||||
queued.to.itself(unless='can_start', internal=True) |
|
||||
queued.to(started, cond='can_start') |
|
||||
started.to.itself(unless='is_finished', internal=True) |
|
||||
started.to(sealed, cond='is_finished')
|
||||
)
|
||||
|
||||
def __init__(self, snapshot, *args, **kwargs):
|
||||
self.snapshot = snapshot
|
||||
super().__init__(snapshot, *args, **kwargs)
|
||||
|
||||
def can_start(self) -> bool:
|
||||
return self.snapshot.seed and self.snapshot.seed.uri
|
||||
|
||||
def is_finished(self) -> bool:
|
||||
return not self.snapshot.has_pending_archiveresults()
|
||||
|
||||
def on_started(self):
|
||||
self.snapshot.create_pending_archiveresults()
|
||||
self.snapshot.bump_retry_at(seconds=60)
|
||||
self.snapshot.save()
|
||||
|
||||
def on_sealed(self):
|
||||
self.snapshot.retry_at = None
|
||||
self.snapshot.save()
|
||||
|
||||
class ArchiveResultMachine(StateMachine, strict_states=True):
|
||||
"""State machine for managing ArchiveResult lifecycle."""
|
||||
|
||||
model: ArchiveResult
|
||||
|
||||
# States
|
||||
queued = State(value=ArchiveResult.ArchiveResultStatus.QUEUED, initial=True)
|
||||
started = State(value=ArchiveResult.ArchiveResultStatus.STARTED)
|
||||
backoff = State(value=ArchiveResult.ArchiveResultStatus.BACKOFF)
|
||||
succeeded = State(value=ArchiveResult.ArchiveResultStatus.SUCCEEDED, final=True)
|
||||
failed = State(value=ArchiveResult.ArchiveResultStatus.FAILED, final=True)
|
||||
|
||||
# Tick Event
|
||||
tick = (
|
||||
queued.to.itself(unless='can_start', internal=True) |
|
||||
queued.to(started, cond='can_start') |
|
||||
started.to.itself(unless='is_finished', internal=True) |
|
||||
started.to(succeeded, cond='is_succeeded') |
|
||||
started.to(failed, cond='is_failed') |
|
||||
started.to(backoff, cond='is_backoff') |
|
||||
backoff.to.itself(unless='can_start', internal=True) |
|
||||
backoff.to(started, cond='can_start') |
|
||||
backoff.to(succeeded, cond='is_succeeded') |
|
||||
backoff.to(failed, cond='is_failed')
|
||||
)
|
||||
|
||||
def __init__(self, archiveresult, *args, **kwargs):
|
||||
self.archiveresult = archiveresult
|
||||
super().__init__(archiveresult, *args, **kwargs)
|
||||
|
||||
def can_start(self) -> bool:
|
||||
return self.archiveresult.snapshot and self.archiveresult.snapshot.is_started()
|
||||
|
||||
def is_succeeded(self) -> bool:
|
||||
return self.archiveresult.output_exists()
|
||||
|
||||
def is_failed(self) -> bool:
|
||||
return not self.archiveresult.output_exists()
|
||||
|
||||
def is_backoff(self) -> bool:
|
||||
return self.archiveresult.status == ArchiveResult.ArchiveResultStatus.BACKOFF
|
||||
|
||||
def on_started(self):
|
||||
self.archiveresult.start_ts = timezone.now()
|
||||
self.archiveresult.create_output_dir()
|
||||
self.archiveresult.bump_retry_at(seconds=60)
|
||||
self.archiveresult.save()
|
||||
|
||||
def on_backoff(self):
|
||||
self.archiveresult.bump_retry_at(seconds=60)
|
||||
self.archiveresult.save()
|
||||
|
||||
def on_succeeded(self):
|
||||
self.archiveresult.end_ts = timezone.now()
|
||||
self.archiveresult.save()
|
||||
|
||||
def on_failed(self):
|
||||
self.archiveresult.end_ts = timezone.now()
|
||||
self.archiveresult.save()
|
||||
|
||||
def after_transition(self, event: str, source: State, target: State):
|
||||
print(f"after '{event}' from '{source.id}' to '{target.id}'")
|
||||
# self.archiveresult.save_merkle_index()
|
||||
# self.archiveresult.save_html_index()
|
||||
# self.archiveresult.save_json_index()
|
||||
return "after_transition"
|
Loading…
Add table
Add a link
Reference in a new issue