summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Grönholm <alex.gronholm@nextday.fi>2022-01-02 15:25:38 +0200
committerAlex Grönholm <alex.gronholm@nextday.fi>2022-01-02 15:25:38 +0200
commita0e1647117225b10e3ee96f9a895ecf2fea40eaf (patch)
tree216cba565b7fb9d1e0ecc49cb3bce6c99dc73fb9
parentab8c32e0a925b6dff2974f32f645c839cdb000e0 (diff)
downloadapscheduler-a0e1647117225b10e3ee96f9a895ecf2fea40eaf.tar.gz
Upgraded syntax and switched linting over to pre-commit.ci
Linting checks are now run by pre-commit.ci instead of GitHub Actions.
-rw-r--r--.github/workflows/test.yml (renamed from .github/workflows/codeqa-test.yml)25
-rw-r--r--.gitignore1
-rw-r--r--.pre-commit-config.yaml49
-rw-r--r--docs/conf.py204
-rw-r--r--docs/modules/executors/base.rst1
-rw-r--r--docs/userguide.rst6
-rw-r--r--examples/executors/processpool.py6
-rw-r--r--examples/misc/reference.py5
-rw-r--r--examples/rpc/client.py3
-rw-r--r--examples/rpc/server.py2
-rw-r--r--examples/schedulers/async_.py3
-rw-r--r--examples/schedulers/sync.py2
-rw-r--r--src/apscheduler/abc.py26
-rw-r--r--src/apscheduler/converters.py3
-rw-r--r--src/apscheduler/datastores/async_adapter.py12
-rw-r--r--src/apscheduler/datastores/async_sqlalchemy.py12
-rw-r--r--src/apscheduler/datastores/memory.py30
-rw-r--r--src/apscheduler/datastores/mongodb.py12
-rw-r--r--src/apscheduler/datastores/sqlalchemy.py16
-rw-r--r--src/apscheduler/enums.py2
-rw-r--r--src/apscheduler/eventbrokers/async_local.py2
-rw-r--r--src/apscheduler/eventbrokers/base.py12
-rw-r--r--src/apscheduler/eventbrokers/local.py6
-rw-r--r--src/apscheduler/eventbrokers/mqtt.py4
-rw-r--r--src/apscheduler/events.py11
-rw-r--r--src/apscheduler/exceptions.py2
-rw-r--r--src/apscheduler/schedulers/async_.py26
-rw-r--r--src/apscheduler/schedulers/sync.py24
-rw-r--r--src/apscheduler/serializers/pickle.py2
-rw-r--r--src/apscheduler/structures.py46
-rw-r--r--src/apscheduler/triggers/calendarinterval.py6
-rw-r--r--src/apscheduler/triggers/combining.py20
-rw-r--r--src/apscheduler/triggers/cron/__init__.py10
-rw-r--r--src/apscheduler/triggers/cron/expressions.py7
-rw-r--r--src/apscheduler/triggers/cron/fields.py6
-rw-r--r--src/apscheduler/triggers/date.py4
-rw-r--r--src/apscheduler/triggers/interval.py8
-rw-r--r--src/apscheduler/validators.py12
-rw-r--r--src/apscheduler/workers/async_.py6
-rw-r--r--src/apscheduler/workers/sync.py6
-rw-r--r--tests/conftest.py7
-rw-r--r--tests/test_datastores.py4
-rw-r--r--tests/test_eventbrokers.py2
-rw-r--r--tests/test_marshalling.py6
-rw-r--r--tests/test_schedulers.py7
-rw-r--r--tests/test_workers.py2
-rw-r--r--tox.ini17
47 files changed, 291 insertions, 394 deletions
diff --git a/.github/workflows/codeqa-test.yml b/.github/workflows/test.yml
index 6bc016e..c29ada8 100644
--- a/.github/workflows/codeqa-test.yml
+++ b/.github/workflows/test.yml
@@ -1,4 +1,4 @@
-name: Python codeqa/test
+name: Run the test suite
on:
push:
@@ -6,29 +6,7 @@ on:
pull_request:
jobs:
- lint:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - name: Set up Python
- uses: actions/setup-python@v2
- with:
- python-version: 3.x
- - uses: actions/cache@v2
- with:
- path: ~/.cache/pip
- key: pip-lint
- - name: Install dependencies
- run: pip install pyproject-flake8 isort mypy
- - name: Run flake8
- run: pflake8 src tests
-# - name: Check types with Mypy
-# run: mypy src tests
- - name: Run isort
- run: isort -c src tests
-
test-linux:
- needs: [lint]
strategy:
fail-fast: false
matrix:
@@ -48,7 +26,6 @@ jobs:
run: pytest
test-others:
- needs: [lint]
strategy:
fail-fast: false
matrix:
diff --git a/.gitignore b/.gitignore
index 2e07730..fe20c48 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,5 @@
.project
.pydevproject
-.pre-commit-config.yaml
.idea/
.coverage
.cache/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..c94bdd6
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,49 @@
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.1.0
+ hooks:
+ - id: check-added-large-files
+ - id: check-case-conflict
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - id: check-toml
+ - id: check-yaml
+ - id: debug-statements
+ - id: end-of-file-fixer
+ - id: mixed-line-ending
+ args: ["--fix=lf"]
+ - id: trailing-whitespace
+
+- repo: https://github.com/pycqa/isort
+ rev: 5.10.1
+ hooks:
+ - id: isort
+ args: ["-a", "from __future__ import annotations"]
+
+- repo: https://github.com/asottile/pyupgrade
+ rev: v2.30.1
+ hooks:
+ - id: pyupgrade
+ args: ["--py37-plus"]
+
+- repo: https://github.com/csachs/pyproject-flake8
+ rev: v0.0.1a2.post1
+ hooks:
+ - id: pyproject-flake8
+ additional_dependencies: [flake8-bugbear]
+
+- repo: https://github.com/codespell-project/codespell
+ rev: v2.1.0
+ hooks:
+ - id: codespell
+
+- repo: https://github.com/pre-commit/pygrep-hooks
+ rev: v1.9.0
+ hooks:
+ - id: python-check-blanket-noqa
+ - id: python-check-blanket-type-ignore
+ - id: python-no-eval
+ - id: python-use-type-annotations
+ - id: rst-backticks
+ - id: rst-directive-colons
+ - id: rst-inline-touching-normal
diff --git a/docs/conf.py b/docs/conf.py
index 38eb45b..2b78efa 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,198 +1,58 @@
-# -*- coding: utf-8 -*-
+# Configuration file for the Sphinx documentation builder.
#
-# APScheduler documentation build configuration file, created by
-# sphinx-quickstart on Fri Jul 31 02:56:30 2009.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
-import apscheduler
+from __future__ import annotations
+# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.append(os.path.abspath('.'))
-
-# -- General configuration -----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'APScheduler'
-copyright = u'Alex Grönholm'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
#
-# The short X.Y version.
-version = apscheduler.version
-# The full version, including alpha/beta/rc tags.
-release = apscheduler.release
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-# List of directories, relative to source directory, that shouldn't be searched
-# for source files.
-exclude_trees = ['_build', 'build', '.tox', '.git', 'examples']
+# -- Project information -----------------------------------------------------
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+project = 'APScheduler'
+copyright = 'Alex Grönholm'
+author = 'Alex Grönholm'
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
+# -- General configuration ---------------------------------------------------
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.intersphinx'
+]
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = []
-autodoc_member_order = 'alphabetical'
-# -- Options for HTML output ---------------------------------------------------
+# -- Options for HTML output -------------------------------------------------
-# The theme to use for HTML and HTML Help pages. Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
html_theme = 'sphinx_rtd_theme'
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-#html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_use_modindex = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'APSchedulerdoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
- ('index', 'APScheduler.tex', u'APScheduler Documentation',
- u'Alex Grönholm', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_use_modindex = True
+html_static_path = ['_static']
intersphinx_mapping = {'python': ('https://docs.python.org/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/latest/', None)}
diff --git a/docs/modules/executors/base.rst b/docs/modules/executors/base.rst
index 5c4bfe7..255d7e6 100644
--- a/docs/modules/executors/base.rst
+++ b/docs/modules/executors/base.rst
@@ -8,4 +8,3 @@ Module Contents
.. autoclass:: BaseExecutor
:members:
-
diff --git a/docs/userguide.rst b/docs/userguide.rst
index 7061b0c..3139857 100644
--- a/docs/userguide.rst
+++ b/docs/userguide.rst
@@ -422,9 +422,9 @@ job's ``misfire_grace_time`` option (which can be set on per-job basis or global
scheduler) to see if the execution should still be triggered. This can lead into the job being
executed several times in succession.
-If this behavior is undesirable for your particular use case, it is possible to use `coalescing` to
-roll all these missed executions into one. In other words, if coalescing is enabled for the job and
-the scheduler sees one or more queued executions for the job, it will only trigger it once. No
+If this behavior is undesirable for your particular use case, it is possible to use ``coalescing``
+to roll all these missed executions into one. In other words, if coalescing is enabled for the job
+and the scheduler sees one or more queued executions for the job, it will only trigger it once. No
misfire events will be sent for the "bypassed" runs.
.. note::
diff --git a/examples/executors/processpool.py b/examples/executors/processpool.py
index 37f58bb..c2b19d5 100644
--- a/examples/executors/processpool.py
+++ b/examples/executors/processpool.py
@@ -2,8 +2,10 @@
Demonstrates how to schedule a job to be run in a process pool on 3 second intervals.
"""
-from datetime import datetime
+from __future__ import annotations
+
import os
+from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
@@ -16,7 +18,7 @@ if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_executor('processpool')
scheduler.add_job(tick, 'interval', seconds=3)
- print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
+ print('Press Ctrl+{} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.initialize()
diff --git a/examples/misc/reference.py b/examples/misc/reference.py
index 1479d2f..0005e1a 100644
--- a/examples/misc/reference.py
+++ b/examples/misc/reference.py
@@ -2,15 +2,16 @@
Basic example showing how to schedule a callable using a textual reference.
"""
+from __future__ import annotations
+
import os
from apscheduler.schedulers.blocking import BlockingScheduler
-
if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_job('sys:stdout.write', 'interval', seconds=3, args=['tick\n'])
- print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
+ print('Press Ctrl+{} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.initialize()
diff --git a/examples/rpc/client.py b/examples/rpc/client.py
index 9393654..234446b 100644
--- a/examples/rpc/client.py
+++ b/examples/rpc/client.py
@@ -6,11 +6,12 @@ Then it schedules a job to run on 2 second intervals and sleeps for 10 seconds.
After that, it unschedules the job and exits.
"""
+from __future__ import annotations
+
from time import sleep
import rpyc
-
conn = rpyc.connect('localhost', 12345)
job = conn.root.add_job('server:print_text', 'interval', args=['Hello, World'], seconds=2)
sleep(10)
diff --git a/examples/rpc/server.py b/examples/rpc/server.py
index 4c7c705..f3a195c 100644
--- a/examples/rpc/server.py
+++ b/examples/rpc/server.py
@@ -7,6 +7,8 @@ To run, first install RPyC using pip. Then change the working directory to the `
and run it with ``python -m server``.
"""
+from __future__ import annotations
+
import rpyc
from rpyc.utils.server import ThreadedServer
diff --git a/examples/schedulers/async_.py b/examples/schedulers/async_.py
index ab1784f..5ea9dca 100644
--- a/examples/schedulers/async_.py
+++ b/examples/schedulers/async_.py
@@ -1,6 +1,9 @@
+from __future__ import annotations
+
import logging
import anyio
+
from apscheduler.schedulers.async_ import AsyncScheduler
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.workers.async_ import AsyncWorker
diff --git a/examples/schedulers/sync.py b/examples/schedulers/sync.py
index 7c7546b..369c535 100644
--- a/examples/schedulers/sync.py
+++ b/examples/schedulers/sync.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import logging
from apscheduler.schedulers.sync import Scheduler
diff --git a/src/apscheduler/abc.py b/src/apscheduler/abc.py
index 58d74cd..929a8cc 100644
--- a/src/apscheduler/abc.py
+++ b/src/apscheduler/abc.py
@@ -3,7 +3,7 @@ from __future__ import annotations
from abc import ABCMeta, abstractmethod
from base64 import b64decode, b64encode
from datetime import datetime
-from typing import TYPE_CHECKING, Any, Callable, Iterable, Iterator, Optional
+from typing import TYPE_CHECKING, Any, Callable, Iterable, Iterator
from uuid import UUID
from .enums import ConflictPolicy
@@ -19,7 +19,7 @@ class Trigger(Iterator[datetime], metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
- def next(self) -> Optional[datetime]:
+ def next(self) -> datetime | None:
"""
Return the next datetime to fire on.
@@ -92,7 +92,7 @@ class EventSource(metaclass=ABCMeta):
@abstractmethod
def subscribe(
self, callback: Callable[[Event], Any],
- event_types: Optional[Iterable[type[Event]]] = None,
+ event_types: Iterable[type[Event]] | None = None,
*,
one_shot: bool = False
) -> Subscription:
@@ -200,7 +200,7 @@ class DataStore:
"""
@abstractmethod
- def get_schedules(self, ids: Optional[set[str]] = None) -> list[Schedule]:
+ def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
"""
Get schedules from the data store.
@@ -249,7 +249,7 @@ class DataStore:
"""
@abstractmethod
- def get_next_schedule_run_time(self) -> Optional[datetime]:
+ def get_next_schedule_run_time(self) -> datetime | None:
"""
Return the earliest upcoming run time of all the schedules in the store, or ``None`` if
there are no active schedules.
@@ -264,7 +264,7 @@ class DataStore:
"""
@abstractmethod
- def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> list[Job]:
+ def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
"""
Get the list of pending jobs.
@@ -273,7 +273,7 @@ class DataStore:
"""
@abstractmethod
- def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> list[Job]:
+ def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
"""
Acquire unclaimed jobs for execution.
@@ -296,7 +296,7 @@ class DataStore:
"""
@abstractmethod
- def get_job_result(self, job_id: UUID) -> Optional[JobResult]:
+ def get_job_result(self, job_id: UUID) -> JobResult | None:
"""
Retrieve the result of a job.
@@ -358,7 +358,7 @@ class AsyncDataStore:
"""
@abstractmethod
- async def get_schedules(self, ids: Optional[set[str]] = None) -> list[Schedule]:
+ async def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
"""
Get schedules from the data store.
@@ -407,7 +407,7 @@ class AsyncDataStore:
"""
@abstractmethod
- async def get_next_schedule_run_time(self) -> Optional[datetime]:
+ async def get_next_schedule_run_time(self) -> datetime | None:
"""
Return the earliest upcoming run time of all the schedules in the store, or ``None`` if
there are no active schedules.
@@ -422,7 +422,7 @@ class AsyncDataStore:
"""
@abstractmethod
- async def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> list[Job]:
+ async def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
"""
Get the list of pending jobs.
@@ -431,7 +431,7 @@ class AsyncDataStore:
"""
@abstractmethod
- async def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> list[Job]:
+ async def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
"""
Acquire unclaimed jobs for execution.
@@ -454,7 +454,7 @@ class AsyncDataStore:
"""
@abstractmethod
- async def get_job_result(self, job_id: UUID) -> Optional[JobResult]:
+ async def get_job_result(self, job_id: UUID) -> JobResult | None:
"""
Retrieve the result of a job.
diff --git a/src/apscheduler/converters.py b/src/apscheduler/converters.py
index 103ab35..7a2680d 100644
--- a/src/apscheduler/converters.py
+++ b/src/apscheduler/converters.py
@@ -2,13 +2,12 @@ from __future__ import annotations
from datetime import datetime, timedelta
from enum import Enum
-from typing import Optional
from uuid import UUID
from . import abc
-def as_aware_datetime(value: datetime | str) -> Optional[datetime]:
+def as_aware_datetime(value: datetime | str) -> datetime | None:
"""Convert the value from a string to a timezone aware datetime."""
if isinstance(value, str):
# fromisoformat() does not handle the "Z" suffix
diff --git a/src/apscheduler/datastores/async_adapter.py b/src/apscheduler/datastores/async_adapter.py
index 3537772..945851f 100644
--- a/src/apscheduler/datastores/async_adapter.py
+++ b/src/apscheduler/datastores/async_adapter.py
@@ -3,7 +3,7 @@ from __future__ import annotations
from contextlib import AsyncExitStack
from datetime import datetime
from functools import partial
-from typing import Iterable, Optional
+from typing import Iterable
from uuid import UUID
import attrs
@@ -58,7 +58,7 @@ class AsyncDataStoreAdapter(AsyncDataStore):
async def get_tasks(self) -> list[Task]:
return await to_thread.run_sync(self.original.get_tasks)
- async def get_schedules(self, ids: Optional[set[str]] = None) -> list[Schedule]:
+ async def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
return await to_thread.run_sync(self.original.get_schedules, ids)
async def add_schedule(self, schedule: Schedule, conflict_policy: ConflictPolicy) -> None:
@@ -73,20 +73,20 @@ class AsyncDataStoreAdapter(AsyncDataStore):
async def release_schedules(self, scheduler_id: str, schedules: list[Schedule]) -> None:
await to_thread.run_sync(self.original.release_schedules, scheduler_id, schedules)
- async def get_next_schedule_run_time(self) -> Optional[datetime]:
+ async def get_next_schedule_run_time(self) -> datetime | None:
return await to_thread.run_sync(self.original.get_next_schedule_run_time)
async def add_job(self, job: Job) -> None:
await to_thread.run_sync(self.original.add_job, job)
- async def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> list[Job]:
+ async def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
return await to_thread.run_sync(self.original.get_jobs, ids)
- async def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> list[Job]:
+ async def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
return await to_thread.run_sync(self.original.acquire_jobs, worker_id, limit)
async def release_job(self, worker_id: str, task_id: str, result: JobResult) -> None:
await to_thread.run_sync(self.original.release_job, worker_id, task_id, result)
- async def get_job_result(self, job_id: UUID) -> Optional[JobResult]:
+ async def get_job_result(self, job_id: UUID) -> JobResult | None:
return await to_thread.run_sync(self.original.get_job_result, job_id)
diff --git a/src/apscheduler/datastores/async_sqlalchemy.py b/src/apscheduler/datastores/async_sqlalchemy.py
index 4ad1483..ca97330 100644
--- a/src/apscheduler/datastores/async_sqlalchemy.py
+++ b/src/apscheduler/datastores/async_sqlalchemy.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from collections import defaultdict
from datetime import datetime, timedelta, timezone
-from typing import Any, Iterable, Optional
+from typing import Any, Iterable
from uuid import UUID
import anyio
@@ -216,7 +216,7 @@ class AsyncSQLAlchemyDataStore(_BaseSQLAlchemyDataStore, AsyncDataStore):
for schedule_id in removed_ids:
await self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
- async def get_schedules(self, ids: Optional[set[str]] = None) -> list[Schedule]:
+ async def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
query = self.t_schedules.select().order_by(self.t_schedules.c.id)
if ids:
query = query.where(self.t_schedules.c.id.in_(ids))
@@ -321,7 +321,7 @@ class AsyncSQLAlchemyDataStore(_BaseSQLAlchemyDataStore, AsyncDataStore):
for schedule_id in finished_schedule_ids:
await self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
- async def get_next_schedule_run_time(self) -> Optional[datetime]:
+ async def get_next_schedule_run_time(self) -> datetime | None:
statenent = select(self.t_schedules.c.next_fire_time).\
where(self.t_schedules.c.next_fire_time.isnot(None)).\
order_by(self.t_schedules.c.next_fire_time).\
@@ -344,7 +344,7 @@ class AsyncSQLAlchemyDataStore(_BaseSQLAlchemyDataStore, AsyncDataStore):
tags=job.tags)
await self._events.publish(event)
- async def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> list[Job]:
+ async def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
query = self.t_jobs.select().order_by(self.t_jobs.c.id)
if ids:
job_ids = [job_id for job_id in ids]
@@ -356,7 +356,7 @@ class AsyncSQLAlchemyDataStore(_BaseSQLAlchemyDataStore, AsyncDataStore):
result = await conn.execute(query)
return await self._deserialize_jobs(result)
- async def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> list[Job]:
+ async def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
async for attempt in self._retrying:
with attempt:
async with self.engine.begin() as conn:
@@ -444,7 +444,7 @@ class AsyncSQLAlchemyDataStore(_BaseSQLAlchemyDataStore, AsyncDataStore):
delete = self.t_jobs.delete().where(self.t_jobs.c.id == result.job_id)
await conn.execute(delete)
- async def get_job_result(self, job_id: UUID) -> Optional[JobResult]:
+ async def get_job_result(self, job_id: UUID) -> JobResult | None:
async for attempt in self._retrying:
with attempt:
async with self.engine.begin() as conn:
diff --git a/src/apscheduler/datastores/memory.py b/src/apscheduler/datastores/memory.py
index 6b8bc7b..65fad9e 100644
--- a/src/apscheduler/datastores/memory.py
+++ b/src/apscheduler/datastores/memory.py
@@ -4,7 +4,7 @@ from bisect import bisect_left, insort_right
from collections import defaultdict
from datetime import MAXYEAR, datetime, timedelta, timezone
from functools import partial
-from typing import Any, Iterable, Optional
+from typing import Any, Iterable
from uuid import UUID
import attrs
@@ -35,9 +35,9 @@ class TaskState:
@attrs.define
class ScheduleState:
schedule: Schedule
- next_fire_time: Optional[datetime] = attrs.field(init=False, eq=False)
- acquired_by: Optional[str] = attrs.field(init=False, eq=False, default=None)
- acquired_until: Optional[datetime] = attrs.field(init=False, eq=False, default=None)
+ next_fire_time: datetime | None = attrs.field(init=False, eq=False)
+ acquired_by: str | None = attrs.field(init=False, eq=False, default=None)
+ acquired_until: datetime | None = attrs.field(init=False, eq=False, default=None)
def __attrs_post_init__(self):
self.next_fire_time = self.schedule.next_fire_time
@@ -61,8 +61,8 @@ class ScheduleState:
class JobState:
job: Job = attrs.field(order=False)
created_at: datetime = attrs.field(init=False, factory=partial(datetime.now, timezone.utc))
- acquired_by: Optional[str] = attrs.field(eq=False, order=False, default=None)
- acquired_until: Optional[datetime] = attrs.field(eq=False, order=False, default=None)
+ acquired_by: str | None = attrs.field(eq=False, order=False, default=None)
+ acquired_until: datetime | None = attrs.field(eq=False, order=False, default=None)
def __eq__(self, other):
return self.job.id == other.job.id
@@ -85,12 +85,12 @@ class MemoryDataStore(DataStore):
_jobs_by_task_id: dict[str, set[JobState]] = attrs.Factory(partial(defaultdict, set))
_job_results: dict[UUID, JobResult] = attrs.Factory(dict)
- def _find_schedule_index(self, state: ScheduleState) -> Optional[int]:
+ def _find_schedule_index(self, state: ScheduleState) -> int | None:
left_index = bisect_left(self._schedules, state)
right_index = bisect_left(self._schedules, state)
return self._schedules.index(state, left_index, right_index + 1)
- def _find_job_index(self, state: JobState) -> Optional[int]:
+ def _find_job_index(self, state: JobState) -> int | None:
left_index = bisect_left(self._jobs, state)
right_index = bisect_left(self._jobs, state)
return self._jobs.index(state, left_index, right_index + 1)
@@ -106,7 +106,7 @@ class MemoryDataStore(DataStore):
def events(self) -> EventSource:
return self._events
- def get_schedules(self, ids: Optional[set[str]] = None) -> list[Schedule]:
+ def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
return [state.schedule for state in self._schedules
if ids is None or state.schedule.id in ids]
@@ -200,7 +200,7 @@ class MemoryDataStore(DataStore):
index = self._find_schedule_index(schedule_state)
del self._schedules[index]
- # Readd the schedule to its new position
+ # Re-add the schedule to its new position
schedule_state.next_fire_time = s.next_fire_time
schedule_state.acquired_by = None
schedule_state.acquired_until = None
@@ -213,7 +213,7 @@ class MemoryDataStore(DataStore):
# Remove schedules that didn't get a new next fire time
self.remove_schedules(finished_schedule_ids)
- def get_next_schedule_run_time(self) -> Optional[datetime]:
+ def get_next_schedule_run_time(self) -> datetime | None:
return self._schedules[0].next_fire_time if self._schedules else None
def add_job(self, job: Job) -> None:
@@ -226,16 +226,16 @@ class MemoryDataStore(DataStore):
tags=job.tags)
self._events.publish(event)
- def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> list[Job]:
+ def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
if ids is not None:
ids = frozenset(ids)
return [state.job for state in self._jobs if ids is None or state.job.id in ids]
- def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> list[Job]:
+ def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
now = datetime.now(timezone.utc)
jobs: list[Job] = []
- for index, job_state in enumerate(self._jobs):
+ for _index, job_state in enumerate(self._jobs):
task_state = self._tasks[job_state.job.task_id]
# Skip already acquired jobs (unless the acquisition lock has expired)
@@ -288,5 +288,5 @@ class MemoryDataStore(DataStore):
JobReleased(job_id=result.job_id, worker_id=worker_id, outcome=result.outcome)
)
- def get_job_result(self, job_id: UUID) -> Optional[JobResult]:
+ def get_job_result(self, job_id: UUID) -> JobResult | None:
return self._job_results.pop(job_id, None)
diff --git a/src/apscheduler/datastores/mongodb.py b/src/apscheduler/datastores/mongodb.py
index b8c4db5..7d6fa76 100644
--- a/src/apscheduler/datastores/mongodb.py
+++ b/src/apscheduler/datastores/mongodb.py
@@ -5,7 +5,7 @@ from collections import defaultdict
from contextlib import ExitStack
from datetime import datetime, timedelta, timezone
from logging import Logger, getLogger
-from typing import Any, Callable, ClassVar, Iterable, Optional
+from typing import Any, Callable, ClassVar, Iterable
from uuid import UUID
import attrs
@@ -182,7 +182,7 @@ class MongoDBDataStore(DataStore):
return tasks
- def get_schedules(self, ids: Optional[set[str]] = None) -> list[Schedule]:
+ def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
filters = {'_id': {'$in': list(ids)}} if ids is not None else {}
for attempt in self._retrying:
with attempt:
@@ -311,7 +311,7 @@ class MongoDBDataStore(DataStore):
for schedule_id in finished_schedule_ids:
self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
- def get_next_schedule_run_time(self) -> Optional[datetime]:
+ def get_next_schedule_run_time(self) -> datetime | None:
for attempt in self._retrying:
with attempt:
document = self._schedules.find_one({'next_run_time': {'$ne': None}},
@@ -334,7 +334,7 @@ class MongoDBDataStore(DataStore):
tags=job.tags)
self._events.publish(event)
- def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> list[Job]:
+ def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
filters = {'_id': {'$in': list(ids)}} if ids is not None else {}
for attempt in self._retrying:
with attempt:
@@ -352,7 +352,7 @@ class MongoDBDataStore(DataStore):
return jobs
- def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> list[Job]:
+ def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
for attempt in self._retrying:
with attempt, self.client.start_session() as session:
cursor = self._jobs.find(
@@ -438,7 +438,7 @@ class MongoDBDataStore(DataStore):
JobReleased(job_id=result.job_id, worker_id=worker_id, outcome=result.outcome)
)
- def get_job_result(self, job_id: UUID) -> Optional[JobResult]:
+ def get_job_result(self, job_id: UUID) -> JobResult | None:
for attempt in self._retrying:
with attempt:
document = self._jobs_results.find_one_and_delete({'_id': job_id})
diff --git a/src/apscheduler/datastores/sqlalchemy.py b/src/apscheduler/datastores/sqlalchemy.py
index 86e8138..ee9fd34 100644
--- a/src/apscheduler/datastores/sqlalchemy.py
+++ b/src/apscheduler/datastores/sqlalchemy.py
@@ -3,7 +3,7 @@ from __future__ import annotations
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from logging import Logger, getLogger
-from typing import Any, Iterable, Optional
+from typing import Any, Iterable
from uuid import UUID
import attrs
@@ -66,10 +66,10 @@ class EmulatedInterval(TypeDecorator):
@attrs.define(kw_only=True, eq=False)
class _BaseSQLAlchemyDataStore:
- schema: Optional[str] = attrs.field(default=None)
+ schema: str | None = attrs.field(default=None)
serializer: Serializer = attrs.field(factory=PickleSerializer)
lock_expiration_delay: float = attrs.field(default=30)
- max_poll_time: Optional[float] = attrs.field(default=1)
+ max_poll_time: float | None = attrs.field(default=1)
max_idle_time: float = attrs.field(default=60)
retry_settings: RetrySettings = attrs.field(default=RetrySettings())
start_from_scratch: bool = attrs.field(default=False)
@@ -342,7 +342,7 @@ class SQLAlchemyDataStore(_BaseSQLAlchemyDataStore, DataStore):
for schedule_id in removed_ids:
self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
- def get_schedules(self, ids: Optional[set[str]] = None) -> list[Schedule]:
+ def get_schedules(self, ids: set[str] | None = None) -> list[Schedule]:
query = self.t_schedules.select().order_by(self.t_schedules.c.id)
if ids:
query = query.where(self.t_schedules.c.id.in_(ids))
@@ -441,7 +441,7 @@ class SQLAlchemyDataStore(_BaseSQLAlchemyDataStore, DataStore):
for schedule_id in finished_schedule_ids:
self._events.publish(ScheduleRemoved(schedule_id=schedule_id))
- def get_next_schedule_run_time(self) -> Optional[datetime]:
+ def get_next_schedule_run_time(self) -> datetime | None:
query = select(self.t_schedules.c.next_fire_time).\
where(self.t_schedules.c.next_fire_time.isnot(None)).\
order_by(self.t_schedules.c.next_fire_time).\
@@ -462,7 +462,7 @@ class SQLAlchemyDataStore(_BaseSQLAlchemyDataStore, DataStore):
tags=job.tags)
self._events.publish(event)
- def get_jobs(self, ids: Optional[Iterable[UUID]] = None) -> list[Job]:
+ def get_jobs(self, ids: Iterable[UUID] | None = None) -> list[Job]:
query = self.t_jobs.select().order_by(self.t_jobs.c.id)
if ids:
job_ids = [job_id for job_id in ids]
@@ -473,7 +473,7 @@ class SQLAlchemyDataStore(_BaseSQLAlchemyDataStore, DataStore):
result = conn.execute(query)
return self._deserialize_jobs(result)
- def acquire_jobs(self, worker_id: str, limit: Optional[int] = None) -> list[Job]:
+ def acquire_jobs(self, worker_id: str, limit: int | None = None) -> list[Job]:
for attempt in self._retrying:
with attempt, self.engine.begin() as conn:
now = datetime.now(timezone.utc)
@@ -563,7 +563,7 @@ class SQLAlchemyDataStore(_BaseSQLAlchemyDataStore, DataStore):
JobReleased(job_id=result.job_id, worker_id=worker_id, outcome=result.outcome)
)
- def get_job_result(self, job_id: UUID) -> Optional[JobResult]:
+ def get_job_result(self, job_id: UUID) -> JobResult | None:
for attempt in self._retrying:
with attempt, self.engine.begin() as conn:
# Retrieve the result
diff --git a/src/apscheduler/enums.py b/src/apscheduler/enums.py
index 5a0929c..dec6669 100644
--- a/src/apscheduler/enums.py
+++ b/src/apscheduler/enums.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from enum import Enum, auto
diff --git a/src/apscheduler/eventbrokers/async_local.py b/src/apscheduler/eventbrokers/async_local.py
index cb0fb96..753505c 100644
--- a/src/apscheduler/eventbrokers/async_local.py
+++ b/src/apscheduler/eventbrokers/async_local.py
@@ -38,7 +38,7 @@ class LocalAsyncEventBroker(AsyncEventBroker, BaseEventBroker):
async def publish_local(self, event: Event) -> None:
event_type = type(event)
one_shot_tokens: list[object] = []
- for token, subscription in self._subscriptions.items():
+ for _token, subscription in self._subscriptions.items():
if subscription.event_types is None or event_type in subscription.event_types:
self._task_group.start_soon(self._deliver_event, subscription.callback, event)
if subscription.one_shot:
diff --git a/src/apscheduler/eventbrokers/base.py b/src/apscheduler/eventbrokers/base.py
index 8ec4dcb..c917d2f 100644
--- a/src/apscheduler/eventbrokers/base.py
+++ b/src/apscheduler/eventbrokers/base.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from base64 import b64decode, b64encode
from logging import Logger, getLogger
-from typing import Any, Callable, Iterable, Optional
+from typing import Any, Callable, Iterable
import attrs
@@ -15,7 +15,7 @@ from ..exceptions import DeserializationError
@attrs.define(eq=False, frozen=True)
class LocalSubscription(Subscription):
callback: Callable[[Event], Any]
- event_types: Optional[set[type[Event]]]
+ event_types: set[type[Event]] | None
one_shot: bool
token: object
_source: BaseEventBroker
@@ -33,7 +33,7 @@ class BaseEventBroker(EventBroker):
self._logger = getLogger(self.__class__.__module__)
def subscribe(self, callback: Callable[[Event], Any],
- event_types: Optional[Iterable[type[Event]]] = None, *,
+ event_types: Iterable[type[Event]] | None = None, *,
one_shot: bool = False) -> Subscription:
types = set(event_types) if event_types else None
token = object()
@@ -57,7 +57,7 @@ class DistributedEventBrokerMixin:
serialized = self.serializer.serialize(attrs.asdict(event))
return event.__class__.__name__ + ' ' + b64encode(serialized).decode('ascii')
- def _reconstitute_event(self, event_type: str, serialized: bytes) -> Optional[Event]:
+ def _reconstitute_event(self, event_type: str, serialized: bytes) -> Event | None:
try:
kwargs = self.serializer.deserialize(serialized)
except DeserializationError:
@@ -78,7 +78,7 @@ class DistributedEventBrokerMixin:
self._logger.exception('Error reconstituting event of type %s', event_type)
return None
- def reconstitute_event(self, payload: bytes) -> Optional[Event]:
+ def reconstitute_event(self, payload: bytes) -> Event | None:
try:
event_type_bytes, serialized = payload.split(b' ', 1)
except ValueError:
@@ -88,7 +88,7 @@ class DistributedEventBrokerMixin:
event_type = event_type_bytes.decode('ascii', errors='replace')
return self._reconstitute_event(event_type, serialized)
- def reconstitute_event_str(self, payload: str) -> Optional[Event]:
+ def reconstitute_event_str(self, payload: str) -> Event | None:
try:
event_type, b64_serialized = payload.split(' ', 1)
except ValueError:
diff --git a/src/apscheduler/eventbrokers/local.py b/src/apscheduler/eventbrokers/local.py
index fd345a1..f780aae 100644
--- a/src/apscheduler/eventbrokers/local.py
+++ b/src/apscheduler/eventbrokers/local.py
@@ -4,7 +4,7 @@ from asyncio import iscoroutinefunction
from concurrent.futures import ThreadPoolExecutor
from contextlib import ExitStack
from threading import Lock
-from typing import Any, Callable, Iterable, Optional
+from typing import Any, Callable, Iterable
import attrs
@@ -31,7 +31,7 @@ class LocalEventBroker(BaseEventBroker):
del self._executor
def subscribe(self, callback: Callable[[Event], Any],
- event_types: Optional[Iterable[type[Event]]] = None, *,
+ event_types: Iterable[type[Event]] | None = None, *,
one_shot: bool = False) -> Subscription:
if iscoroutinefunction(callback):
raise ValueError('Coroutine functions are not supported as callbacks on a synchronous '
@@ -51,7 +51,7 @@ class LocalEventBroker(BaseEventBroker):
event_type = type(event)
with self._subscriptions_lock:
one_shot_tokens: list[object] = []
- for token, subscription in self._subscriptions.items():
+ for _token, subscription in self._subscriptions.items():
if subscription.event_types is None or event_type in subscription.event_types:
self._executor.submit(self._deliver_event, subscription.callback, event)
if subscription.one_shot:
diff --git a/src/apscheduler/eventbrokers/mqtt.py b/src/apscheduler/eventbrokers/mqtt.py
index 60e7195..04db95d 100644
--- a/src/apscheduler/eventbrokers/mqtt.py
+++ b/src/apscheduler/eventbrokers/mqtt.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from concurrent.futures import Future
-from typing import Any, Optional
+from typing import Any
import attrs
from paho.mqtt.client import Client, MQTTMessage
@@ -43,7 +43,7 @@ class MQTTEventBroker(LocalEventBroker, DistributedEventBrokerMixin):
return self
def _on_connect(self, client: Client, userdata: Any, flags: dict[str, Any],
- rc: ReasonCodes | int, properties: Optional[Properties] = None) -> None:
+ rc: ReasonCodes | int, properties: Properties | None = None) -> None:
try:
client.subscribe(self.topic, qos=self.subscribe_qos)
except Exception as exc:
diff --git a/src/apscheduler/events.py b/src/apscheduler/events.py
index 9eb1c7a..f38c12e 100644
--- a/src/apscheduler/events.py
+++ b/src/apscheduler/events.py
@@ -2,7 +2,6 @@ from __future__ import annotations
from datetime import datetime, timezone
from functools import partial
-from typing import Optional
from uuid import UUID
import attrs
@@ -45,13 +44,13 @@ class TaskRemoved(DataStoreEvent):
@attrs.define(kw_only=True, frozen=True)
class ScheduleAdded(DataStoreEvent):
schedule_id: str
- next_fire_time: Optional[datetime] = attrs.field(converter=optional(as_aware_datetime))
+ next_fire_time: datetime | None = attrs.field(converter=optional(as_aware_datetime))
@attrs.define(kw_only=True, frozen=True)
class ScheduleUpdated(DataStoreEvent):
schedule_id: str
- next_fire_time: Optional[datetime] = attrs.field(converter=optional(as_aware_datetime))
+ next_fire_time: datetime | None = attrs.field(converter=optional(as_aware_datetime))
@attrs.define(kw_only=True, frozen=True)
@@ -63,7 +62,7 @@ class ScheduleRemoved(DataStoreEvent):
class JobAdded(DataStoreEvent):
job_id: UUID = attrs.field(converter=as_uuid)
task_id: str
- schedule_id: Optional[str]
+ schedule_id: str | None
tags: frozenset[str] = attrs.field(converter=frozenset)
@@ -100,7 +99,7 @@ class SchedulerStarted(SchedulerEvent):
@attrs.define(kw_only=True, frozen=True)
class SchedulerStopped(SchedulerEvent):
- exception: Optional[BaseException] = None
+ exception: BaseException | None = None
#
@@ -119,7 +118,7 @@ class WorkerStarted(WorkerEvent):
@attrs.define(kw_only=True, frozen=True)
class WorkerStopped(WorkerEvent):
- exception: Optional[BaseException] = None
+ exception: BaseException | None = None
@attrs.define(kw_only=True, frozen=True)
diff --git a/src/apscheduler/exceptions.py b/src/apscheduler/exceptions.py
index b84e58a..64fbf6a 100644
--- a/src/apscheduler/exceptions.py
+++ b/src/apscheduler/exceptions.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from uuid import UUID
diff --git a/src/apscheduler/schedulers/async_.py b/src/apscheduler/schedulers/async_.py
index 61e2e64..6d3287f 100644
--- a/src/apscheduler/schedulers/async_.py
+++ b/src/apscheduler/schedulers/async_.py
@@ -6,7 +6,7 @@ import random
from contextlib import AsyncExitStack
from datetime import datetime, timedelta, timezone
from logging import Logger, getLogger
-from typing import Any, Callable, Iterable, Mapping, Optional
+from typing import Any, Callable, Iterable, Mapping
from uuid import UUID, uuid4
import anyio
@@ -37,11 +37,11 @@ class AsyncScheduler:
data_store: AsyncDataStore = attrs.field(converter=as_async_datastore, factory=MemoryDataStore)
identity: str = attrs.field(kw_only=True, default=None)
start_worker: bool = attrs.field(kw_only=True, default=True)
- logger: Optional[Logger] = attrs.field(kw_only=True, default=getLogger(__name__))
+ logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_wakeup_event: anyio.Event = attrs.field(init=False)
- _worker: Optional[AsyncWorker] = attrs.field(init=False, default=None)
+ _worker: AsyncWorker | None = attrs.field(init=False, default=None)
_events: LocalAsyncEventBroker = attrs.field(init=False, factory=LocalAsyncEventBroker)
_exit_stack: AsyncExitStack = attrs.field(init=False)
@@ -54,7 +54,7 @@ class AsyncScheduler:
return self._events
@property
- def worker(self) -> Optional[AsyncWorker]:
+ def worker(self) -> AsyncWorker | None:
return self._worker
async def __aenter__(self):
@@ -102,11 +102,11 @@ class AsyncScheduler:
self._wakeup_event.set()
async def add_schedule(
- self, func_or_task_id: str | Callable, trigger: Trigger, *, id: Optional[str] = None,
- args: Optional[Iterable] = None, kwargs: Optional[Mapping[str, Any]] = None,
+ self, func_or_task_id: str | Callable, trigger: Trigger, *, id: str | None = None,
+ args: Iterable | None = None, kwargs: Mapping[str, Any] | None = None,
coalesce: CoalescePolicy = CoalescePolicy.latest,
misfire_grace_time: float | timedelta | None = None,
- max_jitter: float | timedelta | None = None, tags: Optional[Iterable[str]] = None,
+ max_jitter: float | timedelta | None = None, tags: Iterable[str] | None = None,
conflict_policy: ConflictPolicy = ConflictPolicy.do_nothing
) -> str:
id = id or str(uuid4())
@@ -139,8 +139,8 @@ class AsyncScheduler:
await self.data_store.remove_schedules({schedule_id})
async def add_job(
- self, func_or_task_id: str | Callable, *, args: Optional[Iterable] = None,
- kwargs: Optional[Mapping[str, Any]] = None, tags: Optional[Iterable[str]] = None
+ self, func_or_task_id: str | Callable, *, args: Iterable | None = None,
+ kwargs: Mapping[str, Any] | None = None, tags: Iterable[str] | None = None
) -> UUID:
"""
Add a job to the data store.
@@ -192,8 +192,8 @@ class AsyncScheduler:
return result
async def run_job(
- self, func_or_task_id: str | Callable, *, args: Optional[Iterable] = None,
- kwargs: Optional[Mapping[str, Any]] = None, tags: Optional[Iterable[str]] = ()
+ self, func_or_task_id: str | Callable, *, args: Iterable | None = None,
+ kwargs: Mapping[str, Any] | None = None, tags: Iterable[str] | None = ()
) -> Any:
"""
Convenience method to add a job and then return its result (or raise its exception).
@@ -207,7 +207,7 @@ class AsyncScheduler:
if event.job_id == job_id:
job_complete_event.set()
- job_id: Optional[UUID] = None
+ job_id: UUID | None = None
with self.data_store.events.subscribe(listener, {JobReleased}):
job_id = await self.add_job(func_or_task_id, args=args, kwargs=kwargs, tags=tags)
await job_complete_event.wait()
@@ -234,7 +234,7 @@ class AsyncScheduler:
task_status.started()
await self._events.publish(SchedulerStarted())
- exception: Optional[BaseException] = None
+ exception: BaseException | None = None
try:
while self._state is RunState.started:
schedules = await self.data_store.acquire_schedules(self.identity, 100)
diff --git a/src/apscheduler/schedulers/sync.py b/src/apscheduler/schedulers/sync.py
index 22270f0..7ead9f8 100644
--- a/src/apscheduler/schedulers/sync.py
+++ b/src/apscheduler/schedulers/sync.py
@@ -8,7 +8,7 @@ from concurrent.futures import FIRST_COMPLETED, Future, ThreadPoolExecutor, wait
from contextlib import ExitStack
from datetime import datetime, timedelta, timezone
from logging import Logger, getLogger
-from typing import Any, Callable, Iterable, Mapping, Optional
+from typing import Any, Callable, Iterable, Mapping
from uuid import UUID, uuid4
import attrs
@@ -36,11 +36,11 @@ class Scheduler:
data_store: DataStore = attrs.field(factory=MemoryDataStore)
identity: str = attrs.field(kw_only=True, default=None)
start_worker: bool = attrs.field(kw_only=True, default=True)
- logger: Optional[Logger] = attrs.field(kw_only=True, default=getLogger(__name__))
+ logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_wakeup_event: threading.Event = attrs.field(init=False)
- _worker: Optional[Worker] = attrs.field(init=False, default=None)
+ _worker: Worker | None = attrs.field(init=False, default=None)
_events: LocalEventBroker = attrs.field(init=False, factory=LocalEventBroker)
_exit_stack: ExitStack = attrs.field(init=False)
@@ -57,7 +57,7 @@ class Scheduler:
return self._state
@property
- def worker(self) -> Optional[Worker]:
+ def worker(self) -> Worker | None:
return self._worker
def __enter__(self) -> Scheduler:
@@ -112,11 +112,11 @@ class Scheduler:
self._wakeup_event.set()
def add_schedule(
- self, func_or_task_id: str | Callable, trigger: Trigger, *, id: Optional[str] = None,
- args: Optional[Iterable] = None, kwargs: Optional[Mapping[str, Any]] = None,
+ self, func_or_task_id: str | Callable, trigger: Trigger, *, id: str | None = None,
+ args: Iterable | None = None, kwargs: Mapping[str, Any] | None = None,
coalesce: CoalescePolicy = CoalescePolicy.latest,
misfire_grace_time: float | timedelta | None = None,
- max_jitter: float | timedelta | None = None, tags: Optional[Iterable[str]] = None,
+ max_jitter: float | timedelta | None = None, tags: Iterable[str] | None = None,
conflict_policy: ConflictPolicy = ConflictPolicy.do_nothing
) -> str:
id = id or str(uuid4())
@@ -149,8 +149,8 @@ class Scheduler:
self.data_store.remove_schedules({schedule_id})
def add_job(
- self, func_or_task_id: str | Callable, *, args: Optional[Iterable] = None,
- kwargs: Optional[Mapping[str, Any]] = None, tags: Optional[Iterable[str]] = None
+ self, func_or_task_id: str | Callable, *, args: Iterable | None = None,
+ kwargs: Mapping[str, Any] | None = None, tags: Iterable[str] | None = None
) -> UUID:
"""
Add a job to the data store.
@@ -202,8 +202,8 @@ class Scheduler:
return result
def run_job(
- self, func_or_task_id: str | Callable, *, args: Optional[Iterable] = None,
- kwargs: Optional[Mapping[str, Any]] = None, tags: Optional[Iterable[str]] = ()
+ self, func_or_task_id: str | Callable, *, args: Iterable | None = None,
+ kwargs: Mapping[str, Any] | None = None, tags: Iterable[str] | None = ()
) -> Any:
"""
Convenience method to add a job and then return its result (or raise its exception).
@@ -217,7 +217,7 @@ class Scheduler:
if event.job_id == job_id:
job_complete_event.set()
- job_id: Optional[UUID] = None
+ job_id: UUID | None = None
with self.data_store.events.subscribe(listener, {JobReleased}):
job_id = self.add_job(func_or_task_id, args=args, kwargs=kwargs, tags=tags)
job_complete_event.wait()
diff --git a/src/apscheduler/serializers/pickle.py b/src/apscheduler/serializers/pickle.py
index d03bdc0..5edf2d0 100644
--- a/src/apscheduler/serializers/pickle.py
+++ b/src/apscheduler/serializers/pickle.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from pickle import dumps, loads
import attrs
diff --git a/src/apscheduler/structures.py b/src/apscheduler/structures.py
index 7f6457d..dd8204c 100644
--- a/src/apscheduler/structures.py
+++ b/src/apscheduler/structures.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from datetime import datetime, timedelta, timezone
from functools import partial
-from typing import Any, Callable, Optional
+from typing import Any, Callable
from uuid import UUID, uuid4
import attrs
@@ -27,8 +27,8 @@ def serialize(inst, field, value):
class Task:
id: str
func: Callable = attrs.field(eq=False, order=False)
- max_running_jobs: Optional[int] = attrs.field(eq=False, order=False, default=None)
- misfire_grace_time: Optional[timedelta] = attrs.field(eq=False, order=False, default=None)
+ max_running_jobs: int | None = attrs.field(eq=False, order=False, default=None)
+ misfire_grace_time: timedelta | None = attrs.field(eq=False, order=False, default=None)
state: Any = None
def marshal(self, serializer: abc.Serializer) -> dict[str, Any]:
@@ -55,15 +55,15 @@ class Schedule:
kwargs: dict[str, Any] = attrs.field(eq=False, order=False, converter=dict, default=())
coalesce: CoalescePolicy = attrs.field(eq=False, order=False, default=CoalescePolicy.latest,
converter=as_enum(CoalescePolicy))
- misfire_grace_time: Optional[timedelta] = attrs.field(eq=False, order=False, default=None,
- converter=as_timedelta)
- max_jitter: Optional[timedelta] = attrs.field(eq=False, order=False, converter=as_timedelta,
- default=None)
+ misfire_grace_time: timedelta | None = attrs.field(eq=False, order=False, default=None,
+ converter=as_timedelta)
+ max_jitter: timedelta | None = attrs.field(eq=False, order=False, converter=as_timedelta,
+ default=None)
tags: frozenset[str] = attrs.field(eq=False, order=False, converter=frozenset, default=())
- next_fire_time: Optional[datetime] = attrs.field(eq=False, order=False, default=None)
- last_fire_time: Optional[datetime] = attrs.field(eq=False, order=False, default=None)
- acquired_by: Optional[str] = attrs.field(eq=False, order=False, default=None)
- acquired_until: Optional[datetime] = attrs.field(eq=False, order=False, default=None)
+ next_fire_time: datetime | None = attrs.field(eq=False, order=False, default=None)
+ last_fire_time: datetime | None = attrs.field(eq=False, order=False, default=None)
+ acquired_by: str | None = attrs.field(eq=False, order=False, default=None)
+ acquired_until: datetime | None = attrs.field(eq=False, order=False, default=None)
def marshal(self, serializer: abc.Serializer) -> dict[str, Any]:
marshalled = attrs.asdict(self, value_serializer=serialize)
@@ -84,7 +84,7 @@ class Schedule:
return cls(**marshalled)
@property
- def next_deadline(self) -> Optional[datetime]:
+ def next_deadline(self) -> datetime | None:
if self.next_fire_time and self.misfire_grace_time:
return self.next_fire_time + self.misfire_grace_time
@@ -97,20 +97,20 @@ class Job:
task_id: str = attrs.field(eq=False, order=False)
args: tuple = attrs.field(eq=False, order=False, converter=tuple, default=())
kwargs: dict[str, Any] = attrs.field(eq=False, order=False, converter=dict, default=())
- schedule_id: Optional[str] = attrs.field(eq=False, order=False, default=None)
- scheduled_fire_time: Optional[datetime] = attrs.field(eq=False, order=False, default=None)
+ schedule_id: str | None = attrs.field(eq=False, order=False, default=None)
+ scheduled_fire_time: datetime | None = attrs.field(eq=False, order=False, default=None)
jitter: timedelta = attrs.field(eq=False, order=False, converter=as_timedelta,
factory=timedelta)
- start_deadline: Optional[datetime] = attrs.field(eq=False, order=False, default=None)
+ start_deadline: datetime | None = attrs.field(eq=False, order=False, default=None)
tags: frozenset[str] = attrs.field(eq=False, order=False, converter=frozenset, default=())
created_at: datetime = attrs.field(eq=False, order=False,
factory=partial(datetime.now, timezone.utc))
- started_at: Optional[datetime] = attrs.field(eq=False, order=False, default=None)
- acquired_by: Optional[str] = attrs.field(eq=False, order=False, default=None)
- acquired_until: Optional[datetime] = attrs.field(eq=False, order=False, default=None)
+ started_at: datetime | None = attrs.field(eq=False, order=False, default=None)
+ acquired_by: str | None = attrs.field(eq=False, order=False, default=None)
+ acquired_until: datetime | None = attrs.field(eq=False, order=False, default=None)
@property
- def original_scheduled_time(self) -> Optional[datetime]:
+ def original_scheduled_time(self) -> datetime | None:
"""The scheduled time without any jitter included."""
if self.scheduled_fire_time is None:
return None
@@ -138,10 +138,10 @@ class Job:
class JobInfo:
job_id: UUID
task_id: str
- schedule_id: Optional[str]
- scheduled_fire_time: Optional[datetime]
+ schedule_id: str | None
+ scheduled_fire_time: datetime | None
jitter: timedelta
- start_deadline: Optional[datetime]
+ start_deadline: datetime | None
tags: frozenset[str]
@classmethod
@@ -157,7 +157,7 @@ class JobResult:
outcome: JobOutcome = attrs.field(eq=False, order=False, converter=as_enum(JobOutcome))
finished_at: datetime = attrs.field(eq=False, order=False,
factory=partial(datetime.now, timezone.utc))
- exception: Optional[BaseException] = attrs.field(eq=False, order=False, default=None)
+ exception: BaseException | None = attrs.field(eq=False, order=False, default=None)
return_value: Any = attrs.field(eq=False, order=False, default=None)
def marshal(self, serializer: abc.Serializer) -> dict[str, Any]:
diff --git a/src/apscheduler/triggers/calendarinterval.py b/src/apscheduler/triggers/calendarinterval.py
index 8d0990d..fbb5896 100644
--- a/src/apscheduler/triggers/calendarinterval.py
+++ b/src/apscheduler/triggers/calendarinterval.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from datetime import date, datetime, time, timedelta, tzinfo
-from typing import Any, Optional
+from typing import Any
import attrs
@@ -67,7 +67,7 @@ class CalendarIntervalTrigger(Trigger):
end_date: date | None = attrs.field(converter=as_date, default=None)
timezone: tzinfo = attrs.field(converter=as_timezone, default='local')
_time: time = attrs.field(init=False, eq=False)
- _last_fire_date: Optional[date] = attrs.field(init=False, eq=False, default=None)
+ _last_fire_date: date | None = attrs.field(init=False, eq=False, default=None)
def __attrs_post_init__(self) -> None:
self._time = time(self.hour, self.minute, self.second, tzinfo=self.timezone)
@@ -78,7 +78,7 @@ class CalendarIntervalTrigger(Trigger):
if self.start_date and self.end_date and self.start_date > self.end_date:
raise ValueError('end_date cannot be earlier than start_date')
- def next(self) -> Optional[datetime]:
+ def next(self) -> datetime | None:
previous_date: date = self._last_fire_date
while True:
if previous_date:
diff --git a/src/apscheduler/triggers/combining.py b/src/apscheduler/triggers/combining.py
index fa0b5c9..bcee64a 100644
--- a/src/apscheduler/triggers/combining.py
+++ b/src/apscheduler/triggers/combining.py
@@ -2,7 +2,7 @@ from __future__ import annotations
from abc import abstractmethod
from datetime import datetime, timedelta
-from typing import Any, Optional
+from typing import Any
import attrs
@@ -15,7 +15,7 @@ from ..validators import as_timedelta, require_state_version
@attrs.define
class BaseCombiningTrigger(Trigger):
triggers: list[Trigger]
- _next_fire_times: list[Optional[datetime]] = attrs.field(init=False, eq=False, factory=list)
+ _next_fire_times: list[datetime | None] = attrs.field(init=False, eq=False, factory=list)
def __getstate__(self) -> dict[str, Any]:
return {
@@ -51,17 +51,17 @@ class AndTrigger(BaseCombiningTrigger):
"""
threshold: timedelta = attrs.field(converter=as_timedelta, default=1)
- max_iterations: Optional[int] = 10000
+ max_iterations: int | None = 10000
- def next(self) -> Optional[datetime]:
+ def next(self) -> datetime | None:
if not self._next_fire_times:
# Fill out the fire times on the first run
self._next_fire_times = [t.next() for t in self.triggers]
for _ in range(self.max_iterations):
# Find the earliest and latest fire times
- earliest_fire_time: Optional[datetime] = None
- latest_fire_time: Optional[datetime] = None
+ earliest_fire_time: datetime | None = None
+ latest_fire_time: datetime | None = None
for fire_time in self._next_fire_times:
# If any of the fire times is None, this trigger is finished
if fire_time is None:
@@ -74,7 +74,7 @@ class AndTrigger(BaseCombiningTrigger):
latest_fire_time = fire_time
# Replace all the fire times that were within the threshold
- for i, trigger in enumerate(self.triggers):
+ for i, _trigger in enumerate(self.triggers):
if self._next_fire_times[i] - earliest_fire_time <= self.threshold:
self._next_fire_times[i] = self.triggers[i].next()
@@ -114,14 +114,14 @@ class OrTrigger(BaseCombiningTrigger):
:param triggers: triggers to combine
"""
- def next(self) -> Optional[datetime]:
+ def next(self) -> datetime | None:
# Fill out the fire times on the first run
if not self._next_fire_times:
self._next_fire_times = [t.next() for t in self.triggers]
# Find out the earliest of the fire times
- earliest_time: Optional[datetime] = min([fire_time for fire_time in self._next_fire_times
- if fire_time is not None], default=None)
+ earliest_time: datetime | None = min((fire_time for fire_time in self._next_fire_times
+ if fire_time is not None), default=None)
if earliest_time is not None:
# Generate new fire times for the trigger(s) that generated the earliest fire time
for i, fire_time in enumerate(self._next_fire_times):
diff --git a/src/apscheduler/triggers/cron/__init__.py b/src/apscheduler/triggers/cron/__init__.py
index 1c1e582..df1f9e5 100644
--- a/src/apscheduler/triggers/cron/__init__.py
+++ b/src/apscheduler/triggers/cron/__init__.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from datetime import datetime, timedelta, tzinfo
-from typing import Any, ClassVar, Optional, Sequence
+from typing import Any, ClassVar, Sequence
import attrs
from tzlocal import get_localzone
@@ -59,12 +59,12 @@ class CronTrigger(Trigger):
end_time: datetime | None = None
timezone: tzinfo | str = attrs.field(converter=as_timezone, factory=get_localzone)
_fields: list[BaseField] = attrs.field(init=False, eq=False, factory=list)
- _last_fire_time: Optional[datetime] = attrs.field(init=False, eq=False, default=None)
+ _last_fire_time: datetime | None = attrs.field(init=False, eq=False, default=None)
def __attrs_post_init__(self) -> None:
self._set_fields([self.year, self.month, self.day, self.week, self.day_of_week, self.hour,
self.minute, self.second])
- self._last_fire_time: Optional[datetime] = None
+ self._last_fire_time: datetime | None = None
def _set_fields(self, values: Sequence[int | str | None]) -> None:
self._fields = []
@@ -80,7 +80,7 @@ class CronTrigger(Trigger):
self._fields.append(field)
@classmethod
- def from_crontab(cls, expr: str, timezone: str | tzinfo = 'local') -> 'CronTrigger':
+ def from_crontab(cls, expr: str, timezone: str | tzinfo = 'local') -> CronTrigger:
"""
Create a :class:`~CronTrigger` from a standard crontab expression.
@@ -153,7 +153,7 @@ class CronTrigger(Trigger):
return datetime(**values, tzinfo=self.timezone)
- def next(self) -> Optional[datetime]:
+ def next(self) -> datetime | None:
if self._last_fire_time:
start_time = self._last_fire_time + timedelta(microseconds=1)
else:
diff --git a/src/apscheduler/triggers/cron/expressions.py b/src/apscheduler/triggers/cron/expressions.py
index e417dea..5c94afa 100644
--- a/src/apscheduler/triggers/cron/expressions.py
+++ b/src/apscheduler/triggers/cron/expressions.py
@@ -4,7 +4,6 @@ from __future__ import annotations
import re
from calendar import monthrange
from datetime import datetime
-from typing import Optional
from ...validators import as_int
@@ -35,7 +34,7 @@ class AllExpression:
raise ValueError(f'the step value ({self.step}) is higher than the total range of the '
f'expression ({value_range})')
- def get_next_value(self, dateval: datetime, field) -> Optional[int]:
+ def get_next_value(self, dateval: datetime, field) -> int | None:
start = field.get_value(dateval)
minval = field.get_min(dateval)
maxval = field.get_max(dateval)
@@ -144,7 +143,7 @@ class WeekdayRangeExpression(RangeExpression):
value_re = re.compile(r'(?P<first>[a-z]+)(?:-(?P<last>[a-z]+))?', re.IGNORECASE)
- def __init__(self, first: str, last: Optional[str] = None):
+ def __init__(self, first: str, last: str | None = None):
first_num = get_weekday_index(first)
last_num = get_weekday_index(last) if last else None
super().__init__(first_num, last_num)
@@ -171,7 +170,7 @@ class WeekdayPositionExpression(AllExpression):
except ValueError:
raise ValueError(f'Invalid weekday name {weekday_name!r}') from None
- def get_next_value(self, dateval: datetime, field) -> Optional[int]:
+ def get_next_value(self, dateval: datetime, field) -> int | None:
# Figure out the weekday of the month's first day and the number of days in that month
first_day_wday, last_day = monthrange(dateval.year, dateval.month)
diff --git a/src/apscheduler/triggers/cron/fields.py b/src/apscheduler/triggers/cron/fields.py
index d15fcd5..e68fdca 100644
--- a/src/apscheduler/triggers/cron/fields.py
+++ b/src/apscheduler/triggers/cron/fields.py
@@ -4,7 +4,7 @@ from __future__ import annotations
import re
from calendar import monthrange
from datetime import datetime
-from typing import Any, ClassVar, List, Optional, Sequence
+from typing import Any, ClassVar, Sequence
from .expressions import (
WEEKDAYS, AllExpression, LastDayOfMonthExpression, MonthRangeExpression, RangeExpression,
@@ -32,7 +32,7 @@ class BaseField:
def __init__(self, name: str, exprs: int | str):
self.name = name
- self.expressions: List = []
+ self.expressions: list = []
for expr in SEPARATOR.split(str(exprs).strip()):
self.append_expression(expr)
@@ -45,7 +45,7 @@ class BaseField:
def get_value(self, dateval: datetime) -> int:
return getattr(dateval, self.name)
- def get_next_value(self, dateval: datetime) -> Optional[int]:
+ def get_next_value(self, dateval: datetime) -> int | None:
smallest = None
for expr in self.expressions:
value = expr.get_next_value(dateval, self)
diff --git a/src/apscheduler/triggers/date.py b/src/apscheduler/triggers/date.py
index 08494c3..173c972 100644
--- a/src/apscheduler/triggers/date.py
+++ b/src/apscheduler/triggers/date.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from datetime import datetime
-from typing import Any, Optional
+from typing import Any
import attrs
@@ -21,7 +21,7 @@ class DateTrigger(Trigger):
run_time: datetime = attrs.field(converter=as_aware_datetime)
_completed: bool = attrs.field(init=False, eq=False, default=False)
- def next(self) -> Optional[datetime]:
+ def next(self) -> datetime | None:
if not self._completed:
self._completed = True
return self.run_time
diff --git a/src/apscheduler/triggers/interval.py b/src/apscheduler/triggers/interval.py
index 0f85d36..629499b 100644
--- a/src/apscheduler/triggers/interval.py
+++ b/src/apscheduler/triggers/interval.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from datetime import datetime, timedelta
-from typing import Any, Optional
+from typing import Any
import attrs
@@ -37,9 +37,9 @@ class IntervalTrigger(Trigger):
seconds: float = 0
microseconds: float = 0
start_time: datetime = attrs.field(converter=as_aware_datetime, factory=datetime.now)
- end_time: Optional[datetime] = attrs.field(converter=as_aware_datetime, default=None)
+ end_time: datetime | None = attrs.field(converter=as_aware_datetime, default=None)
_interval: timedelta = attrs.field(init=False, eq=False, repr=False)
- _last_fire_time: Optional[datetime] = attrs.field(init=False, eq=False, default=None)
+ _last_fire_time: datetime | None = attrs.field(init=False, eq=False, default=None)
def __attrs_post_init__(self) -> None:
self._interval = timedelta(weeks=self.weeks, days=self.days, hours=self.hours,
@@ -52,7 +52,7 @@ class IntervalTrigger(Trigger):
if self.end_time and self.end_time < self.start_time:
raise ValueError('end_time cannot be earlier than start_time')
- def next(self) -> Optional[datetime]:
+ def next(self) -> datetime | None:
if self._last_fire_time is None:
self._last_fire_time = self.start_time
else:
diff --git a/src/apscheduler/validators.py b/src/apscheduler/validators.py
index 179eabd..baa7b50 100644
--- a/src/apscheduler/validators.py
+++ b/src/apscheduler/validators.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import sys
from datetime import date, datetime, timedelta, timezone, tzinfo
-from typing import Any, Optional
+from typing import Any
import attrs
from attrs import Attribute
@@ -17,7 +17,7 @@ else:
from backports.zoneinfo import ZoneInfo
-def as_int(value) -> Optional[int]:
+def as_int(value) -> int | None:
"""Convert the value into an integer."""
if value is None:
return None
@@ -49,7 +49,7 @@ def as_timezone(value: str | tzinfo | None) -> tzinfo:
f'{value.__class__.__qualname__} instead')
-def as_date(value: date | str | None) -> Optional[date]:
+def as_date(value: date | str | None) -> date | None:
"""
Convert the value to a date.
@@ -67,21 +67,21 @@ def as_date(value: date | str | None) -> Optional[date]:
raise TypeError(f'Expected string or date, got {value.__class__.__qualname__} instead')
-def as_timestamp(value: Optional[datetime]) -> Optional[float]:
+def as_timestamp(value: datetime | None) -> float | None:
if value is None:
return None
return value.timestamp()
-def as_ordinal_date(value: Optional[date]) -> Optional[int]:
+def as_ordinal_date(value: date | None) -> int | None:
if value is None:
return None
return value.toordinal()
-def as_aware_datetime(value: datetime | str | None) -> Optional[datetime]:
+def as_aware_datetime(value: datetime | str | None) -> datetime | None:
"""
Convert the value to a timezone aware datetime.
diff --git a/src/apscheduler/workers/async_.py b/src/apscheduler/workers/async_.py
index 50f76a9..c581b0e 100644
--- a/src/apscheduler/workers/async_.py
+++ b/src/apscheduler/workers/async_.py
@@ -6,7 +6,7 @@ from contextlib import AsyncExitStack
from datetime import datetime, timezone
from inspect import isawaitable
from logging import Logger, getLogger
-from typing import Callable, Optional
+from typing import Callable
from uuid import UUID
import anyio
@@ -30,7 +30,7 @@ class AsyncWorker:
data_store: AsyncDataStore = attrs.field(converter=as_async_datastore)
max_concurrent_jobs: int = attrs.field(kw_only=True, validator=positive_integer, default=100)
identity: str = attrs.field(kw_only=True, default=None)
- logger: Optional[Logger] = attrs.field(kw_only=True, default=getLogger(__name__))
+ logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_wakeup_event: anyio.Event = attrs.field(init=False, factory=anyio.Event)
@@ -92,7 +92,7 @@ class AsyncWorker:
task_status.started()
await self._events.publish(WorkerStarted())
- exception: Optional[BaseException] = None
+ exception: BaseException | None = None
try:
async with create_task_group() as tg:
while self._state is RunState.started:
diff --git a/src/apscheduler/workers/sync.py b/src/apscheduler/workers/sync.py
index 69718b5..7a6bee9 100644
--- a/src/apscheduler/workers/sync.py
+++ b/src/apscheduler/workers/sync.py
@@ -8,7 +8,7 @@ from contextlib import ExitStack
from contextvars import copy_context
from datetime import datetime, timezone
from logging import Logger, getLogger
-from typing import Callable, Optional
+from typing import Callable
from uuid import UUID
import attrs
@@ -28,7 +28,7 @@ class Worker:
data_store: DataStore
max_concurrent_jobs: int = attrs.field(kw_only=True, validator=positive_integer, default=20)
identity: str = attrs.field(kw_only=True, default=None)
- logger: Optional[Logger] = attrs.field(kw_only=True, default=getLogger(__name__))
+ logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_wakeup_event: threading.Event = attrs.field(init=False)
@@ -98,7 +98,7 @@ class Worker:
self._events.publish(WorkerStarted())
executor = ThreadPoolExecutor(max_workers=self.max_concurrent_jobs)
- exception: Optional[BaseException] = None
+ exception: BaseException | None = None
try:
while self._state is RunState.started:
available_slots = self.max_concurrent_jobs - len(self._running_jobs)
diff --git a/tests/conftest.py b/tests/conftest.py
index 89c4510..37c363f 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,5 +1,6 @@
+from __future__ import annotations
+
import sys
-from typing import Optional
import pytest
@@ -24,10 +25,10 @@ def timezone() -> ZoneInfo:
pytest.param(CBORSerializer, id='cbor'),
pytest.param(JSONSerializer, id='json')
])
-def serializer(request) -> Optional[Serializer]:
+def serializer(request) -> Serializer | None:
return request.param() if request.param else None
@pytest.fixture
-def anyio_backend() -> 'str':
+def anyio_backend() -> str:
return 'asyncio'
diff --git a/tests/test_datastores.py b/tests/test_datastores.py
index a73c5c2..3edff4a 100644
--- a/tests/test_datastores.py
+++ b/tests/test_datastores.py
@@ -3,7 +3,7 @@ from __future__ import annotations
from contextlib import asynccontextmanager
from datetime import datetime, timezone
from tempfile import TemporaryDirectory
-from typing import AsyncGenerator, Optional
+from typing import AsyncGenerator
import anyio
import pytest
@@ -145,7 +145,7 @@ def schedules() -> list[Schedule]:
@asynccontextmanager
async def capture_events(
datastore: AsyncDataStore, limit: int,
- event_types: Optional[set[type[Event]]] = None
+ event_types: set[type[Event]] | None = None
) -> AsyncGenerator[list[Event], None]:
def listener(event: Event) -> None:
events.append(event)
diff --git a/tests/test_eventbrokers.py b/tests/test_eventbrokers.py
index 024b63d..efc9382 100644
--- a/tests/test_eventbrokers.py
+++ b/tests/test_eventbrokers.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from concurrent.futures import Future
from datetime import datetime, timezone
from queue import Empty, Queue
diff --git a/tests/test_marshalling.py b/tests/test_marshalling.py
index 61fc783..c209388 100644
--- a/tests/test_marshalling.py
+++ b/tests/test_marshalling.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import sys
from datetime import timedelta
from functools import partial
@@ -36,7 +38,7 @@ class InheritedDummyClass(DummyClass):
pass
-class TestCallableToRef(object):
+class TestCallableToRef:
@pytest.mark.parametrize('obj, error', [
(partial(DummyClass.meth), 'Cannot create a reference to a partial()'),
(lambda: None, 'Cannot create a reference to a lambda')
@@ -66,7 +68,7 @@ class TestCallableToRef(object):
assert callable_to_ref(input) == expected
-class TestCallableFromRef(object):
+class TestCallableFromRef:
def test_valid_ref(self):
from logging.handlers import RotatingFileHandler
assert callable_from_ref('logging.handlers:RotatingFileHandler') is RotatingFileHandler
diff --git a/tests/test_schedulers.py b/tests/test_schedulers.py
index 2605535..7657cec 100644
--- a/tests/test_schedulers.py
+++ b/tests/test_schedulers.py
@@ -1,8 +1,9 @@
+from __future__ import annotations
+
import sys
import threading
import time
from datetime import datetime, timedelta, timezone
-from typing import Optional
from uuid import UUID
import anyio
@@ -114,7 +115,7 @@ class TestAsyncScheduler:
async with AsyncScheduler(start_worker=False) as scheduler:
trigger = IntervalTrigger(seconds=3, start_time=orig_start_time)
job_added_event = anyio.Event()
- job_id: Optional[UUID] = None
+ job_id: UUID | None = None
scheduler.events.subscribe(job_added_listener, {JobAdded})
schedule_id = await scheduler.add_schedule(dummy_async_job, trigger,
max_jitter=max_jitter)
@@ -260,7 +261,7 @@ class TestSyncScheduler:
with Scheduler(start_worker=False) as scheduler:
trigger = IntervalTrigger(seconds=3, start_time=orig_start_time)
job_added_event = threading.Event()
- job_id: Optional[UUID] = None
+ job_id: UUID | None = None
scheduler.events.subscribe(job_added_listener, {JobAdded})
schedule_id = scheduler.add_schedule(dummy_async_job, trigger, max_jitter=max_jitter)
schedule = scheduler.get_schedule(schedule_id)
diff --git a/tests/test_workers.py b/tests/test_workers.py
index f1f020e..dcfafb1 100644
--- a/tests/test_workers.py
+++ b/tests/test_workers.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import threading
from datetime import datetime, timezone
from typing import Callable
diff --git a/tox.ini b/tox.ini
index 956a193..26a882e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,19 +9,10 @@ commands = coverage run -m pytest {posargs}
extras = test
[testenv:lint]
-deps =
- isort
- autopep8
-commands =
- autopep8 -r -i src tests
- isort src tests
-skip_install = true
-
-[testenv:flake8]
-basepython = python3.7
-depends = lint
-deps = pyproject-flake8
-commands = pflake8 src tests
+depends =
+basepython = python3
+deps = pre-commit
+commands = pre-commit run --all-files --show-diff-on-failure
skip_install = true
[testenv:mypy]