summaryrefslogtreecommitdiff
path: root/src/apscheduler/schedulers/sync.py
blob: c49359c2b62553f9a04a4104dd900b153f233a60 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
from __future__ import annotations

import os
import platform
import random
import threading
from concurrent.futures import FIRST_COMPLETED, Future, ThreadPoolExecutor, wait
from contextlib import ExitStack
from datetime import datetime, timedelta, timezone
from logging import Logger, getLogger
from typing import Any, Callable, Iterable, Mapping, Optional
from uuid import UUID, uuid4

import attr

from ..abc import DataStore, EventSource, Trigger
from ..context import current_scheduler
from ..datastores.memory import MemoryDataStore
from ..enums import CoalescePolicy, ConflictPolicy, JobOutcome, RunState
from ..eventbrokers.local import LocalEventBroker
from ..events import (
    Event, JobReleased, ScheduleAdded, SchedulerStarted, SchedulerStopped, ScheduleUpdated)
from ..exceptions import JobCancelled, JobDeadlineMissed, JobLookupError
from ..marshalling import callable_to_ref
from ..structures import Job, JobResult, Schedule, Task
from ..workers.sync import Worker

_microsecond_delta = timedelta(microseconds=1)
_zero_timedelta = timedelta()


@attr.define(eq=False)
class Scheduler:
    """A synchronous scheduler implementation."""

    data_store: DataStore = attr.field(factory=MemoryDataStore)
    identity: str = attr.field(kw_only=True, default=None)
    start_worker: bool = attr.field(kw_only=True, default=True)
    logger: Optional[Logger] = attr.field(kw_only=True, default=getLogger(__name__))

    _state: RunState = attr.field(init=False, default=RunState.stopped)
    _wakeup_event: threading.Event = attr.field(init=False)
    _worker: Optional[Worker] = attr.field(init=False, default=None)
    _events: LocalEventBroker = attr.field(init=False, factory=LocalEventBroker)
    _exit_stack: ExitStack = attr.field(init=False)

    def __attrs_post_init__(self) -> None:
        if not self.identity:
            self.identity = f'{platform.node()}-{os.getpid()}-{id(self)}'

    @property
    def events(self) -> EventSource:
        return self._events

    @property
    def state(self) -> RunState:
        return self._state

    @property
    def worker(self) -> Optional[Worker]:
        return self._worker

    def __enter__(self) -> Scheduler:
        self._state = RunState.starting
        self._wakeup_event = threading.Event()
        self._exit_stack = ExitStack()
        self._exit_stack.__enter__()
        self._exit_stack.enter_context(self._events)

        # Initialize the data store and start relaying events to the scheduler's event broker
        self._exit_stack.enter_context(self.data_store)
        self._exit_stack.enter_context(self.data_store.events.subscribe(self._events.publish))

        # Wake up the scheduler if the data store emits a significant schedule event
        self._exit_stack.enter_context(
            self.data_store.events.subscribe(
                lambda event: self._wakeup_event.set(), {ScheduleAdded, ScheduleUpdated}
            )
        )

        # Start the built-in worker, if configured to do so
        if self.start_worker:
            token = current_scheduler.set(self)
            try:
                self._worker = Worker(self.data_store)
                self._exit_stack.enter_context(self._worker)
            finally:
                current_scheduler.reset(token)

        # Start the scheduler and return when it has signalled readiness or raised an exception
        start_future: Future[Event] = Future()
        with self._events.subscribe(start_future.set_result, one_shot=True):
            executor = ThreadPoolExecutor(1)
            self._exit_stack.push(lambda exc_type, *args: executor.shutdown(wait=exc_type is None))
            run_future = executor.submit(self.run)
            wait([start_future, run_future], return_when=FIRST_COMPLETED)

        if run_future.done():
            run_future.result()

        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self._state = RunState.stopping
        self._wakeup_event.set()
        self._exit_stack.__exit__(exc_type, exc_val, exc_tb)
        self._state = RunState.stopped
        del self._wakeup_event

    def add_schedule(
        self, func_or_task_id: str | Callable, trigger: Trigger, *, id: Optional[str] = None,
        args: Optional[Iterable] = None, kwargs: Optional[Mapping[str, Any]] = None,
        coalesce: CoalescePolicy = CoalescePolicy.latest,
        misfire_grace_time: float | timedelta | None = None,
        max_jitter: float | timedelta | None = None, tags: Optional[Iterable[str]] = None,
        conflict_policy: ConflictPolicy = ConflictPolicy.do_nothing
    ) -> str:
        id = id or str(uuid4())
        args = tuple(args or ())
        kwargs = dict(kwargs or {})
        tags = frozenset(tags or ())
        if isinstance(misfire_grace_time, (int, float)):
            misfire_grace_time = timedelta(seconds=misfire_grace_time)

        if callable(func_or_task_id):
            task = Task(id=callable_to_ref(func_or_task_id), func=func_or_task_id)
            self.data_store.add_task(task)
        else:
            task = self.data_store.get_task(func_or_task_id)

        schedule = Schedule(id=id, task_id=task.id, trigger=trigger, args=args, kwargs=kwargs,
                            coalesce=coalesce, misfire_grace_time=misfire_grace_time,
                            max_jitter=max_jitter, tags=tags)
        schedule.next_fire_time = trigger.next()
        self.data_store.add_schedule(schedule, conflict_policy)
        self.logger.info('Added new schedule (task=%r, trigger=%r); next run time at %s', task,
                         trigger, schedule.next_fire_time)
        return schedule.id

    def get_schedule(self, id: str) -> Schedule:
        schedules = self.data_store.get_schedules({id})
        return schedules[0]

    def remove_schedule(self, schedule_id: str) -> None:
        self.data_store.remove_schedules({schedule_id})

    def add_job(
        self, func_or_task_id: str | Callable, *, args: Optional[Iterable] = None,
        kwargs: Optional[Mapping[str, Any]] = None, tags: Optional[Iterable[str]] = None
    ) -> UUID:
        """
        Add a job to the data store.

        :param func_or_task_id:
        :param args: positional arguments to call the target callable with
        :param kwargs: keyword arguments to call the target callable with
        :param tags:
        :return: the ID of the newly created job

        """
        if callable(func_or_task_id):
            task = Task(id=callable_to_ref(func_or_task_id), func=func_or_task_id)
            self.data_store.add_task(task)
        else:
            task = self.data_store.get_task(func_or_task_id)

        job = Job(task_id=task.id, args=args or (), kwargs=kwargs or {}, tags=tags or frozenset())
        self.data_store.add_job(job)
        return job.id

    def get_job_result(self, job_id: UUID, *, wait: bool = True) -> JobResult:
        """
        Retrieve the result of a job.

        :param job_id: the ID of the job
        :param wait: if ``True``, wait until the job has ended (one way or another), ``False`` to
                     raise an exception if the result is not yet available
        :raises JobLookupError: if the job does not exist in the data store

        """
        wait_event = threading.Event()

        def listener(event: JobReleased) -> None:
            if event.job_id == job_id:
                wait_event.set()

        with self.data_store.events.subscribe(listener, {JobReleased}):
            result = self.data_store.get_job_result(job_id)
            if result:
                return result
            elif not wait:
                raise JobLookupError(job_id)

            wait_event.wait()

        result = self.data_store.get_job_result(job_id)
        assert isinstance(result, JobResult)
        return result

    def run_job(
        self, func_or_task_id: str | Callable, *, args: Optional[Iterable] = None,
        kwargs: Optional[Mapping[str, Any]] = None, tags: Optional[Iterable[str]] = ()
    ) -> Any:
        """
        Convenience method to add a job and then return its result (or raise its exception).

        :returns: the return value of the target function

        """
        job_complete_event = threading.Event()

        def listener(event: JobReleased) -> None:
            if event.job_id == job_id:
                job_complete_event.set()

        job_id: Optional[UUID] = None
        with self.data_store.events.subscribe(listener, {JobReleased}):
            job_id = self.add_job(func_or_task_id, args=args, kwargs=kwargs, tags=tags)
            job_complete_event.wait()

        result = self.get_job_result(job_id)
        if result.outcome is JobOutcome.success:
            return result.return_value
        elif result.outcome is JobOutcome.error:
            raise result.exception
        elif result.outcome is JobOutcome.missed_start_deadline:
            raise JobDeadlineMissed
        elif result.outcome is JobOutcome.cancelled:
            raise JobCancelled
        else:
            raise RuntimeError(f'Unknown job outcome: {result.outcome}')

    def run(self) -> None:
        if self._state is not RunState.starting:
            raise RuntimeError(f'This function cannot be called while the scheduler is in the '
                               f'{self._state} state')

        # Signal that the scheduler has started
        self._state = RunState.started
        self._events.publish(SchedulerStarted())

        try:
            while self._state is RunState.started:
                schedules = self.data_store.acquire_schedules(self.identity, 100)
                now = datetime.now(timezone.utc)
                for schedule in schedules:
                    # Calculate a next fire time for the schedule, if possible
                    fire_times = [schedule.next_fire_time]
                    calculate_next = schedule.trigger.next
                    while True:
                        try:
                            fire_time = calculate_next()
                        except Exception:
                            self.logger.exception(
                                'Error computing next fire time for schedule %r of task %r – '
                                'removing schedule', schedule.id, schedule.task_id)
                            break

                        # Stop if the calculated fire time is in the future
                        if fire_time is None or fire_time > now:
                            schedule.next_fire_time = fire_time
                            break

                        # Only keep all the fire times if coalesce policy = "all"
                        if schedule.coalesce is CoalescePolicy.all:
                            fire_times.append(fire_time)
                        elif schedule.coalesce is CoalescePolicy.latest:
                            fire_times[0] = fire_time

                    # Add one or more jobs to the job queue
                    max_jitter = schedule.max_jitter.total_seconds() if schedule.max_jitter else 0
                    for i, fire_time in enumerate(fire_times):
                        # Calculate a jitter if max_jitter > 0
                        jitter = _zero_timedelta
                        if max_jitter:
                            if i + 1 < len(fire_times):
                                next_fire_time = fire_times[i + 1]
                            else:
                                next_fire_time = schedule.next_fire_time

                            if next_fire_time is not None:
                                # Jitter must never be so high that it would cause a fire time to
                                # equal or exceed the next fire time
                                jitter_s = min([
                                    max_jitter,
                                    (next_fire_time - fire_time
                                     - _microsecond_delta).total_seconds()
                                ])
                                jitter = timedelta(seconds=random.uniform(0, jitter_s))
                                fire_time += jitter

                        schedule.last_fire_time = fire_time
                        job = Job(task_id=schedule.task_id, args=schedule.args,
                                  kwargs=schedule.kwargs, schedule_id=schedule.id,
                                  scheduled_fire_time=fire_time, jitter=jitter,
                                  start_deadline=schedule.next_deadline, tags=schedule.tags)
                        self.data_store.add_job(job)

                # Update the schedules (and release the scheduler's claim on them)
                self.data_store.release_schedules(self.identity, schedules)

                # If we received fewer schedules than the maximum amount, sleep until the next
                # schedule is due or the scheduler is explicitly woken up
                wait_time = None
                if len(schedules) < 100:
                    next_fire_time = self.data_store.get_next_schedule_run_time()
                    if next_fire_time:
                        wait_time = (datetime.now(timezone.utc) - next_fire_time).total_seconds()

                if self._wakeup_event.wait(wait_time):
                    self._wakeup_event = threading.Event()
        except BaseException as exc:
            self._state = RunState.stopped
            self._events.publish(SchedulerStopped(exception=exc))
            raise

        self._state = RunState.stopped
        self._events.publish(SchedulerStopped())

    # def stop(self) -> None:
    #     self.portal.call(self._scheduler.stop)
    #
    # def wait_until_stopped(self) -> None:
    #     self.portal.call(self._scheduler.wait_until_stopped)