diff --git a/.circleci/config.yml b/.circleci/config.yml index 4eaf808..f29bd26 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -80,6 +80,7 @@ workflows: - "3.11" - "3.12" - "3.13" + - "3.14" - test_nooptionals: matrix: parameters: diff --git a/.github/workflows/test-debian.yml b/.github/workflows/test-debian.yml index 6e2cb60..0865175 100644 --- a/.github/workflows/test-debian.yml +++ b/.github/workflows/test-debian.yml @@ -10,7 +10,7 @@ jobs: name: "Build debian package" runs-on: ubuntu-latest container: - image: "debian:latest" + image: "debian:testing" steps: - name: Install dependencies run: | diff --git a/debian/control b/debian/control index 62bbdc6..ccf538a 100644 --- a/debian/control +++ b/debian/control @@ -8,6 +8,8 @@ Build-Depends: debhelper (>= 11), Build-Depends-Indep: dh-python, pybuild-plugin-pyproject, python3-all, + python3-aiohttp, + python3-asgiref, python3-decorator (>= 4.0.10), python3-pytest, python3-pytest-benchmark, diff --git a/debian/patches/0002-Update-pyproject.toml.patch b/debian/patches/0002-Update-pyproject.toml.patch index 3e68f5a..df7b84b 100644 --- a/debian/patches/0002-Update-pyproject.toml.patch +++ b/debian/patches/0002-Update-pyproject.toml.patch @@ -3,7 +3,7 @@ Index: python3-prometheus-client/pyproject.toml --- python3-prometheus-client.orig/pyproject.toml +++ python3-prometheus-client/pyproject.toml @@ -7,11 +7,7 @@ name = "prometheus_client" - version = "0.23.1" + version = "0.24.1" description = "Python client for the Prometheus monitoring system." readme = "README.md" -license = "Apache-2.0 AND BSD-2-Clause" @@ -15,7 +15,7 @@ Index: python3-prometheus-client/pyproject.toml requires-python = ">=3.9" authors = [ { name = "The Prometheus Authors", email = "prometheus-developers@googlegroups.com" }, -@@ -50,3 +46,17 @@ Documentation = "https://prometheus.gith +@@ -57,3 +53,17 @@ Documentation = "https://prometheus.gith [tool.setuptools.package-data] prometheus_client = ['py.typed'] diff --git a/docs/content/exporting/http/aiohttp.md b/docs/content/exporting/http/aiohttp.md new file mode 100644 index 0000000..726b92c --- /dev/null +++ b/docs/content/exporting/http/aiohttp.md @@ -0,0 +1,23 @@ +--- +title: AIOHTTP +weight: 6 +--- + +To use Prometheus with a [AIOHTTP server](https://docs.aiohttp.org/en/stable/web.html), +there is `make_aiohttp_handler` which creates a handler. + +```python +from aiohttp import web +from prometheus_client.aiohttp import make_aiohttp_handler + +app = web.Application() +app.router.add_get("/metrics", make_aiohttp_handler()) +``` + +By default, this handler will instruct AIOHTTP to automatically compress the +response if requested by the client. This behaviour can be disabled by passing +`disable_compression=True` when creating the app, like this: + +```python +app.router.add_get("/metrics", make_aiohttp_handler(disable_compression=True)) +``` diff --git a/docs/content/exporting/http/django.md b/docs/content/exporting/http/django.md new file mode 100644 index 0000000..a900a3a --- /dev/null +++ b/docs/content/exporting/http/django.md @@ -0,0 +1,47 @@ +--- +title: Django +weight: 5 +--- + +To use Prometheus with [Django](https://www.djangoproject.com/) you can use the provided view class +to add a metrics endpoint to your app. + +```python +# urls.py + +from django.urls import path +from prometheus_client.django import PrometheusDjangoView + +urlpatterns = [ + # ... any other urls that you want + path("metrics/", PrometheusDjangoView.as_view(), name="prometheus-metrics"), + # ... still more urls +] +``` + +By default, Multiprocessing support is activated if environment variable `PROMETHEUS_MULTIPROC_DIR` is set. +You can override this through the view arguments: + +```python +from django.conf import settings + +urlpatterns = [ + path( + "metrics/", + PrometheusDjangoView.as_view( + multiprocess_mode=settings.YOUR_SETTING # or any boolean value + ), + name="prometheus-metrics", + ), +] +``` + +Full multiprocessing instructions are provided [here]({{< ref "/multiprocess" >}}). + +# django-prometheus + +The included `PrometheusDjangoView` is useful if you want to define your own metrics from scratch. + +An external package called [django-prometheus](https://github.com/django-commons/django-prometheus/) +can be used instead if you want to get a bunch of ready-made monitoring metrics for your Django application +and easily benefit from utilities such as models monitoring. diff --git a/docs/content/exporting/pushgateway.md b/docs/content/exporting/pushgateway.md index bf5eb11..d9f9a94 100644 --- a/docs/content/exporting/pushgateway.md +++ b/docs/content/exporting/pushgateway.md @@ -54,6 +54,20 @@ g.set_to_current_time() push_to_gateway('localhost:9091', job='batchA', registry=registry, handler=my_auth_handler) ``` +# Compressing data before sending to pushgateway +Pushgateway (version >= 1.5.0) supports gzip and snappy compression (v > 1.6.0). This can help in network constrained environments. +To compress a push request, set the `compression` argument to `'gzip'` or `'snappy'`: +```python +push_to_gateway( + 'localhost:9091', + job='batchA', + registry=registry, + handler=my_auth_handler, + compression='gzip', +) +``` +Snappy compression requires the optional [`python-snappy`](https://github.com/andrix/python-snappy) package. + TLS Auth is also supported when using the push gateway with a special handler. ```python diff --git a/mypy.ini b/mypy.ini index fe372d0..3aa142c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,5 +1,5 @@ [mypy] -exclude = prometheus_client/decorator.py|prometheus_client/twisted|tests/test_twisted.py +exclude = prometheus_client/decorator.py|prometheus_client/twisted|tests/test_twisted.py|prometheus_client/django|tests/test_django.py implicit_reexport = False disallow_incomplete_defs = True diff --git a/prometheus_client/aiohttp/__init__.py b/prometheus_client/aiohttp/__init__.py new file mode 100644 index 0000000..9e5da15 --- /dev/null +++ b/prometheus_client/aiohttp/__init__.py @@ -0,0 +1,5 @@ +from .exposition import make_aiohttp_handler + +__all__ = [ + "make_aiohttp_handler", +] diff --git a/prometheus_client/aiohttp/exposition.py b/prometheus_client/aiohttp/exposition.py new file mode 100644 index 0000000..c1ae254 --- /dev/null +++ b/prometheus_client/aiohttp/exposition.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from aiohttp import hdrs, web +from aiohttp.typedefs import Handler + +from ..exposition import _bake_output +from ..registry import Collector, REGISTRY + + +def make_aiohttp_handler( + registry: Collector = REGISTRY, + disable_compression: bool = False, +) -> Handler: + """Create a aiohttp handler which serves the metrics from a registry.""" + + async def prometheus_handler(request: web.Request) -> web.Response: + # Prepare parameters + params = {key: request.query.getall(key) for key in request.query.keys()} + accept_header = ",".join(request.headers.getall(hdrs.ACCEPT, [])) + accept_encoding_header = "" + # Bake output + status, headers, output = _bake_output( + registry, + accept_header, + accept_encoding_header, + params, + # use AIOHTTP's compression + disable_compression=True, + ) + response = web.Response( + status=int(status.split(" ")[0]), + headers=headers, + body=output, + ) + if not disable_compression: + response.enable_compression() + return response + + return prometheus_handler diff --git a/prometheus_client/asgi.py b/prometheus_client/asgi.py index affd984..6e527ca 100644 --- a/prometheus_client/asgi.py +++ b/prometheus_client/asgi.py @@ -2,10 +2,10 @@ from urllib.parse import parse_qs from .exposition import _bake_output -from .registry import CollectorRegistry, REGISTRY +from .registry import Collector, REGISTRY -def make_asgi_app(registry: CollectorRegistry = REGISTRY, disable_compression: bool = False) -> Callable: +def make_asgi_app(registry: Collector = REGISTRY, disable_compression: bool = False) -> Callable: """Create a ASGI app which serves the metrics from a registry.""" async def prometheus_app(scope, receive, send): diff --git a/prometheus_client/bridge/graphite.py b/prometheus_client/bridge/graphite.py index 8cadbed..235324b 100755 --- a/prometheus_client/bridge/graphite.py +++ b/prometheus_client/bridge/graphite.py @@ -8,7 +8,7 @@ from timeit import default_timer from typing import Callable, Tuple -from ..registry import CollectorRegistry, REGISTRY +from ..registry import Collector, REGISTRY # Roughly, have to keep to what works as a file name. # We also remove periods, so labels can be distinguished. @@ -48,7 +48,7 @@ def run(self): class GraphiteBridge: def __init__(self, address: Tuple[str, int], - registry: CollectorRegistry = REGISTRY, + registry: Collector = REGISTRY, timeout_seconds: float = 30, _timer: Callable[[], float] = time.time, tags: bool = False, diff --git a/prometheus_client/django/__init__.py b/prometheus_client/django/__init__.py new file mode 100644 index 0000000..280dbfb --- /dev/null +++ b/prometheus_client/django/__init__.py @@ -0,0 +1,5 @@ +from .exposition import PrometheusDjangoView + +__all__ = [ + "PrometheusDjangoView", +] diff --git a/prometheus_client/django/exposition.py b/prometheus_client/django/exposition.py new file mode 100644 index 0000000..71fc8d8 --- /dev/null +++ b/prometheus_client/django/exposition.py @@ -0,0 +1,43 @@ +import os + +from django.http import HttpResponse +from django.views import View + +import prometheus_client +from prometheus_client import multiprocess +from prometheus_client.exposition import _bake_output + + +class PrometheusDjangoView(View): + multiprocess_mode: bool = "PROMETHEUS_MULTIPROC_DIR" in os.environ or "prometheus_multiproc_dir" in os.environ + registry: prometheus_client.CollectorRegistry = None + + def get(self, request, *args, **kwargs): + if self.registry is None: + if self.multiprocess_mode: + self.registry = prometheus_client.CollectorRegistry() + multiprocess.MultiProcessCollector(self.registry) + else: + self.registry = prometheus_client.REGISTRY + accept_header = request.headers.get("Accept") + accept_encoding_header = request.headers.get("Accept-Encoding") + # Bake output + status, headers, output = _bake_output( + registry=self.registry, + accept_header=accept_header, + accept_encoding_header=accept_encoding_header, + params=request.GET, + disable_compression=False, + ) + status = int(status.split(" ")[0]) + return HttpResponse( + output, + status=status, + headers=headers, + ) + + def options(self, request, *args, **kwargs): + return HttpResponse( + status=200, + headers={"Allow": "OPTIONS,GET"}, + ) diff --git a/prometheus_client/exposition.py b/prometheus_client/exposition.py index 0d47170..ca06d91 100644 --- a/prometheus_client/exposition.py +++ b/prometheus_client/exposition.py @@ -9,7 +9,9 @@ import ssl import sys import threading -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +from typing import ( + Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union, +) from urllib.error import HTTPError from urllib.parse import parse_qs, quote_plus, urlparse from urllib.request import ( @@ -19,9 +21,16 @@ from wsgiref.simple_server import make_server, WSGIRequestHandler, WSGIServer from .openmetrics import exposition as openmetrics -from .registry import CollectorRegistry, REGISTRY +from .registry import Collector, REGISTRY from .utils import floatToGoString, parse_version +try: + import snappy # type: ignore + SNAPPY_AVAILABLE = True +except ImportError: + snappy = None # type: ignore + SNAPPY_AVAILABLE = False + __all__ = ( 'CONTENT_TYPE_LATEST', 'CONTENT_TYPE_PLAIN_0_0_4', @@ -46,6 +55,7 @@ """Content type of the latest format""" CONTENT_TYPE_LATEST = CONTENT_TYPE_PLAIN_1_0_0 +CompressionType = Optional[Literal['gzip', 'snappy']] class _PrometheusRedirectHandler(HTTPRedirectHandler): @@ -118,7 +128,7 @@ def _bake_output(registry, accept_header, accept_encoding_header, params, disabl return '200 OK', headers, output -def make_wsgi_app(registry: CollectorRegistry = REGISTRY, disable_compression: bool = False) -> Callable: +def make_wsgi_app(registry: Collector = REGISTRY, disable_compression: bool = False) -> Callable: """Create a WSGI app which serves the metrics from a registry.""" def prometheus_app(environ, start_response): @@ -223,7 +233,7 @@ def _get_ssl_ctx( def start_wsgi_server( port: int, addr: str = '0.0.0.0', - registry: CollectorRegistry = REGISTRY, + registry: Collector = REGISTRY, certfile: Optional[str] = None, keyfile: Optional[str] = None, client_cafile: Optional[str] = None, @@ -252,12 +262,12 @@ class TmpServer(ThreadingWSGIServer): start_http_server = start_wsgi_server -def generate_latest(registry: CollectorRegistry = REGISTRY, escaping: str = openmetrics.UNDERSCORES) -> bytes: +def generate_latest(registry: Collector = REGISTRY, escaping: str = openmetrics.UNDERSCORES) -> bytes: """ Generates the exposition format using the basic Prometheus text format. Params: - registry: CollectorRegistry to export data from. + registry: Collector to export data from. escaping: Escaping scheme used for metric and label names. Returns: UTF-8 encoded string containing the metrics in text format. @@ -330,7 +340,7 @@ def sample_line(samples): return ''.join(output).encode('utf-8') -def choose_encoder(accept_header: str) -> Tuple[Callable[[CollectorRegistry], bytes], str]: +def choose_encoder(accept_header: str) -> Tuple[Callable[[Collector], bytes], str]: # Python client library accepts a narrower range of content-types than # Prometheus does. accept_header = accept_header or '' @@ -408,7 +418,7 @@ def gzip_accepted(accept_encoding_header: str) -> bool: class MetricsHandler(BaseHTTPRequestHandler): """HTTP handler that gives metrics from ``REGISTRY``.""" - registry: CollectorRegistry = REGISTRY + registry: Collector = REGISTRY def do_GET(self) -> None: # Prepare parameters @@ -429,7 +439,7 @@ def log_message(self, format: str, *args: Any) -> None: """Log nothing.""" @classmethod - def factory(cls, registry: CollectorRegistry) -> type: + def factory(cls, registry: Collector) -> type: """Returns a dynamic MetricsHandler class tied to the passed registry. """ @@ -444,7 +454,7 @@ def factory(cls, registry: CollectorRegistry) -> type: return MyMetricsHandler -def write_to_textfile(path: str, registry: CollectorRegistry, escaping: str = openmetrics.ALLOWUTF8, tmpdir: Optional[str] = None) -> None: +def write_to_textfile(path: str, registry: Collector, escaping: str = openmetrics.ALLOWUTF8, tmpdir: Optional[str] = None) -> None: """Write metrics to the given path. This is intended for use with the Node exporter textfile collector. @@ -592,10 +602,11 @@ def tls_auth_handler( def push_to_gateway( gateway: str, job: str, - registry: CollectorRegistry, + registry: Collector, grouping_key: Optional[Dict[str, Any]] = None, timeout: Optional[float] = 30, handler: Callable = default_handler, + compression: CompressionType = None, ) -> None: """Push metrics to the given pushgateway. @@ -603,7 +614,7 @@ def push_to_gateway( 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics - `registry` is an instance of CollectorRegistry + `registry` is a Collector, normally an instance of CollectorRegistry `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long push will attempt to connect before giving up. @@ -632,19 +643,22 @@ def push_to_gateway( failure. 'content' is the data which should be used to form the HTTP Message Body. + `compression` selects the payload compression. Supported values are 'gzip' + and 'snappy'. Defaults to None (no compression). This overwrites all metrics with the same job and grouping_key. This uses the PUT HTTP method.""" - _use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler) + _use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler, compression) def pushadd_to_gateway( gateway: str, job: str, - registry: Optional[CollectorRegistry], + registry: Optional[Collector], grouping_key: Optional[Dict[str, Any]] = None, timeout: Optional[float] = 30, handler: Callable = default_handler, + compression: CompressionType = None, ) -> None: """PushAdd metrics to the given pushgateway. @@ -652,7 +666,7 @@ def pushadd_to_gateway( 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics - `registry` is an instance of CollectorRegistry + `registry` is a Collector, normally an instance of CollectorRegistry `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long push will attempt to connect before giving up. @@ -663,10 +677,12 @@ def pushadd_to_gateway( will be carried out by a default handler. See the 'prometheus_client.push_to_gateway' documentation for implementation requirements. + `compression` selects the payload compression. Supported values are 'gzip' + and 'snappy'. Defaults to None (no compression). This replaces metrics with the same name, job and grouping_key. This uses the POST HTTP method.""" - _use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler) + _use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler, compression) def delete_from_gateway( @@ -702,10 +718,11 @@ def _use_gateway( method: str, gateway: str, job: str, - registry: Optional[CollectorRegistry], + registry: Optional[Collector], grouping_key: Optional[Dict[str, Any]], timeout: Optional[float], handler: Callable, + compression: CompressionType = None, ) -> None: gateway_url = urlparse(gateway) # See https://bugs.python.org/issue27657 for details on urlparse in py>=3.7.6. @@ -715,24 +732,53 @@ def _use_gateway( gateway = gateway.rstrip('/') url = '{}/metrics/{}/{}'.format(gateway, *_escape_grouping_key("job", job)) - data = b'' - if method != 'DELETE': - if registry is None: - registry = REGISTRY - data = generate_latest(registry) - if grouping_key is None: grouping_key = {} url += ''.join( '/{}/{}'.format(*_escape_grouping_key(str(k), str(v))) for k, v in sorted(grouping_key.items())) + data = b'' + headers: List[Tuple[str, str]] = [] + if method != 'DELETE': + if registry is None: + registry = REGISTRY + data = generate_latest(registry) + data, headers = _compress_payload(data, compression) + else: + # DELETE requests still need Content-Type header per test expectations + headers = [('Content-Type', CONTENT_TYPE_PLAIN_0_0_4)] + if compression is not None: + raise ValueError('Compression is not supported for DELETE requests.') + handler( url=url, method=method, timeout=timeout, - headers=[('Content-Type', CONTENT_TYPE_PLAIN_0_0_4)], data=data, + headers=headers, data=data, )() +def _compress_payload(data: bytes, compression: CompressionType) -> Tuple[bytes, List[Tuple[str, str]]]: + headers = [('Content-Type', CONTENT_TYPE_PLAIN_0_0_4)] + if compression is None: + return data, headers + + encoding = compression.lower() + if encoding == 'gzip': + headers.append(('Content-Encoding', 'gzip')) + return gzip.compress(data), headers + if encoding == 'snappy': + if not SNAPPY_AVAILABLE: + raise RuntimeError('Snappy compression requires the python-snappy package to be installed.') + headers.append(('Content-Encoding', 'snappy')) + compressor = snappy.StreamCompressor() + compressed = compressor.compress(data) + flush = getattr(compressor, 'flush', None) + if callable(flush): + compressed += flush() + return compressed, headers + raise ValueError(f"Unsupported compression type: {compression}") + + def _escape_grouping_key(k, v): if v == "": # Per https://github.com/prometheus/pushgateway/pull/346. diff --git a/prometheus_client/metrics.py b/prometheus_client/metrics.py index b9f25ff..4c53b26 100644 --- a/prometheus_client/metrics.py +++ b/prometheus_client/metrics.py @@ -109,6 +109,10 @@ def __init__(self: T, registry: Optional[CollectorRegistry] = REGISTRY, _labelvalues: Optional[Sequence[str]] = None, ) -> None: + + self._original_name = name + self._namespace = namespace + self._subsystem = subsystem self._name = _build_full_name(self._type, name, namespace, subsystem, unit) self._labelnames = _validate_labelnames(self, labelnames) self._labelvalues = tuple(_labelvalues or ()) @@ -176,13 +180,25 @@ def labels(self: T, *labelvalues: Any, **labelkwargs: Any) -> T: labelvalues = tuple(str(l) for l in labelvalues) with self._lock: if labelvalues not in self._metrics: + + original_name = getattr(self, '_original_name', self._name) + namespace = getattr(self, '_namespace', '') + subsystem = getattr(self, '_subsystem', '') + unit = getattr(self, '_unit', '') + + child_kwargs = dict(self._kwargs) if self._kwargs else {} + for k in ('namespace', 'subsystem', 'unit'): + child_kwargs.pop(k, None) + self._metrics[labelvalues] = self.__class__( - self._name, + original_name, documentation=self._documentation, labelnames=self._labelnames, - unit=self._unit, + namespace=namespace, + subsystem=subsystem, + unit=unit, _labelvalues=labelvalues, - **self._kwargs + **child_kwargs ) return self._metrics[labelvalues] @@ -203,6 +219,39 @@ def remove(self, *labelvalues: Any) -> None: if labelvalues in self._metrics: del self._metrics[labelvalues] + def remove_by_labels(self, labels: dict[str, str]) -> None: + """Remove all series whose labelset partially matches the given labels.""" + if 'prometheus_multiproc_dir' in os.environ or 'PROMETHEUS_MULTIPROC_DIR' in os.environ: + warnings.warn( + "Removal of labels has not been implemented in multi-process mode yet.", + UserWarning + ) + + if not self._labelnames: + raise ValueError('No label names were set when constructing %s' % self) + + if not isinstance(labels, dict): + raise TypeError("labels must be a dict of {label_name: label_value}") + + if not labels: + return # no operation + + invalid = [k for k in labels.keys() if k not in self._labelnames] + if invalid: + raise ValueError( + 'Unknown label names: %s; expected %s' % (invalid, self._labelnames) + ) + + pos_filter = {self._labelnames.index(k): str(v) for k, v in labels.items()} + + with self._lock: + # list(...) to avoid "dictionary changed size during iteration" + for lv in list(self._metrics.keys()): + if all(lv[pos] == want for pos, want in pos_filter.items()): + # pop with default avoids KeyError if concurrently removed + self._metrics.pop(lv, None) + + def clear(self) -> None: """Remove all labelsets from the metric""" if 'prometheus_multiproc_dir' in os.environ or 'PROMETHEUS_MULTIPROC_DIR' in os.environ: diff --git a/prometheus_client/multiprocess.py b/prometheus_client/multiprocess.py index 2682190..db55874 100644 --- a/prometheus_client/multiprocess.py +++ b/prometheus_client/multiprocess.py @@ -88,32 +88,42 @@ def _parse_key(key): @staticmethod def _accumulate_metrics(metrics, accumulate): for metric in metrics.values(): - samples = defaultdict(float) - sample_timestamps = defaultdict(float) + samples = defaultdict(lambda: defaultdict(float)) + sample_timestamps = defaultdict(lambda: defaultdict(float)) buckets = defaultdict(lambda: defaultdict(float)) - samples_setdefault = samples.setdefault for s in metric.samples: name, labels, value, timestamp, exemplar, native_histogram_value = s + + if ( + metric.type == 'gauge' + and metric._multiprocess_mode in ( + 'min', 'livemin', + 'max', 'livemax', + 'sum', 'livesum', + 'mostrecent', 'livemostrecent', + ) + ): + labels = tuple(l for l in labels if l[0] != 'pid') + if metric.type == 'gauge': - without_pid_key = (name, tuple(l for l in labels if l[0] != 'pid')) if metric._multiprocess_mode in ('min', 'livemin'): - current = samples_setdefault(without_pid_key, value) + current = samples[labels].setdefault((name, labels), value) if value < current: - samples[without_pid_key] = value + samples[labels][(name, labels)] = value elif metric._multiprocess_mode in ('max', 'livemax'): - current = samples_setdefault(without_pid_key, value) + current = samples[labels].setdefault((name, labels), value) if value > current: - samples[without_pid_key] = value + samples[labels][(name, labels)] = value elif metric._multiprocess_mode in ('sum', 'livesum'): - samples[without_pid_key] += value + samples[labels][(name, labels)] += value elif metric._multiprocess_mode in ('mostrecent', 'livemostrecent'): - current_timestamp = sample_timestamps[without_pid_key] + current_timestamp = sample_timestamps[labels][name] timestamp = float(timestamp or 0) if current_timestamp < timestamp: - samples[without_pid_key] = value - sample_timestamps[without_pid_key] = timestamp + samples[labels][(name, labels)] = value + sample_timestamps[labels][name] = timestamp else: # all/liveall - samples[(name, labels)] = value + samples[labels][(name, labels)] = value elif metric.type == 'histogram': # A for loop with early exit is faster than a genexpr @@ -127,10 +137,10 @@ def _accumulate_metrics(metrics, accumulate): break else: # did not find the `le` key # _sum/_count - samples[(name, labels)] += value + samples[labels][(name, labels)] += value else: # Counter and Summary. - samples[(name, labels)] += value + samples[labels][(name, labels)] += value # Accumulate bucket values. if metric.type == 'histogram': @@ -143,14 +153,17 @@ def _accumulate_metrics(metrics, accumulate): ) if accumulate: acc += value - samples[sample_key] = acc + samples[labels][sample_key] = acc else: - samples[sample_key] = value + samples[labels][sample_key] = value if accumulate: - samples[(metric.name + '_count', labels)] = acc + samples[labels][(metric.name + '_count', labels)] = acc # Convert to correct sample format. - metric.samples = [Sample(name_, dict(labels), value) for (name_, labels), value in samples.items()] + metric.samples = [] + for _, samples_by_labels in samples.items(): + for (name_, labels), value in samples_by_labels.items(): + metric.samples.append(Sample(name_, dict(labels), value)) return metrics.values() def collect(self): diff --git a/prometheus_client/registry.py b/prometheus_client/registry.py index 8de4ce9..9934117 100644 --- a/prometheus_client/registry.py +++ b/prometheus_client/registry.py @@ -1,24 +1,21 @@ -from abc import ABC, abstractmethod import copy from threading import Lock -from typing import Dict, Iterable, List, Optional +from typing import Dict, Iterable, List, Optional, Protocol from .metrics_core import Metric -# Ideally this would be a Protocol, but Protocols are only available in Python >= 3.8. -class Collector(ABC): - @abstractmethod +class Collector(Protocol): def collect(self) -> Iterable[Metric]: - pass + """Collect metrics.""" -class _EmptyCollector(Collector): +class _EmptyCollector: def collect(self) -> Iterable[Metric]: return [] -class CollectorRegistry(Collector): +class CollectorRegistry: """Metric collector registry. Collectors must have a no-argument method 'collect' that returns a list of diff --git a/pyproject.toml b/pyproject.toml index 8698859..ed3ef38 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "prometheus_client" -version = "0.23.1" +version = "0.24.1" description = "Python client for the Prometheus monitoring system." readme = "README.md" license = "Apache-2.0 AND BSD-2-Clause" @@ -34,6 +34,7 @@ classifiers = [ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: System :: Monitoring", @@ -43,6 +44,12 @@ classifiers = [ twisted = [ "twisted", ] +aiohttp = [ + "aiohttp", +] +django = [ + "django", +] [project.urls] Homepage = "https://github.com/prometheus/client_python" diff --git a/tests/test_aiohttp.py b/tests/test_aiohttp.py new file mode 100644 index 0000000..e4fa368 --- /dev/null +++ b/tests/test_aiohttp.py @@ -0,0 +1,192 @@ +from __future__ import annotations + +import gzip +from typing import TYPE_CHECKING +from unittest import skipUnless + +from prometheus_client import CollectorRegistry, Counter +from prometheus_client.exposition import CONTENT_TYPE_PLAIN_0_0_4 + +try: + from aiohttp import ClientResponse, hdrs, web + from aiohttp.test_utils import AioHTTPTestCase + + from prometheus_client.aiohttp import make_aiohttp_handler + + AIOHTTP_INSTALLED = True +except ImportError: + if TYPE_CHECKING: + assert False + + from unittest import IsolatedAsyncioTestCase as AioHTTPTestCase + + AIOHTTP_INSTALLED = False + + +class AioHTTPTest(AioHTTPTestCase): + @skipUnless(AIOHTTP_INSTALLED, "AIOHTTP is not installed") + def setUp(self) -> None: + self.registry = CollectorRegistry() + + async def get_application(self) -> web.Application: + app = web.Application() + # The AioHTTPTestCase requires that applications be static, so we need + # both versions to be available so the test can choose between them + app.router.add_get("/metrics", make_aiohttp_handler(self.registry)) + app.router.add_get( + "/metrics_uncompressed", + make_aiohttp_handler(self.registry, disable_compression=True), + ) + return app + + def increment_metrics( + self, + metric_name: str, + help_text: str, + increments: int, + ) -> None: + c = Counter(metric_name, help_text, registry=self.registry) + for _ in range(increments): + c.inc() + + def assert_metrics( + self, + output: str, + metric_name: str, + help_text: str, + increments: int, + ) -> None: + self.assertIn("# HELP " + metric_name + "_total " + help_text + "\n", output) + self.assertIn("# TYPE " + metric_name + "_total counter\n", output) + self.assertIn(metric_name + "_total " + str(increments) + ".0\n", output) + + def assert_not_metrics( + self, + output: str, + metric_name: str, + help_text: str, + increments: int, + ) -> None: + self.assertNotIn("# HELP " + metric_name + "_total " + help_text + "\n", output) + self.assertNotIn("# TYPE " + metric_name + "_total counter\n", output) + self.assertNotIn(metric_name + "_total " + str(increments) + ".0\n", output) + + async def assert_outputs( + self, + response: ClientResponse, + metric_name: str, + help_text: str, + increments: int, + ) -> None: + self.assertIn( + CONTENT_TYPE_PLAIN_0_0_4, + response.headers.getall(hdrs.CONTENT_TYPE), + ) + output = await response.text() + self.assert_metrics(output, metric_name, help_text, increments) + + async def validate_metrics( + self, + metric_name: str, + help_text: str, + increments: int, + ) -> None: + """ + AIOHTTP handler serves the metrics from the provided registry. + """ + self.increment_metrics(metric_name, help_text, increments) + async with self.client.get("/metrics") as response: + response.raise_for_status() + await self.assert_outputs(response, metric_name, help_text, increments) + + async def test_report_metrics_1(self): + await self.validate_metrics("counter", "A counter", 2) + + async def test_report_metrics_2(self): + await self.validate_metrics("counter", "Another counter", 3) + + async def test_report_metrics_3(self): + await self.validate_metrics("requests", "Number of requests", 5) + + async def test_report_metrics_4(self): + await self.validate_metrics("failed_requests", "Number of failed requests", 7) + + async def test_gzip(self): + # Increment a metric. + metric_name = "counter" + help_text = "A counter" + increments = 2 + self.increment_metrics(metric_name, help_text, increments) + + async with self.client.get( + "/metrics", + auto_decompress=False, + headers={hdrs.ACCEPT_ENCODING: "gzip"}, + ) as response: + response.raise_for_status() + self.assertIn(hdrs.CONTENT_ENCODING, response.headers) + self.assertIn("gzip", response.headers.getall(hdrs.CONTENT_ENCODING)) + body = await response.read() + output = gzip.decompress(body).decode("utf8") + self.assert_metrics(output, metric_name, help_text, increments) + + async def test_gzip_disabled(self): + # Increment a metric. + metric_name = "counter" + help_text = "A counter" + increments = 2 + self.increment_metrics(metric_name, help_text, increments) + + async with self.client.get( + "/metrics_uncompressed", + auto_decompress=False, + headers={hdrs.ACCEPT_ENCODING: "gzip"}, + ) as response: + response.raise_for_status() + self.assertNotIn(hdrs.CONTENT_ENCODING, response.headers) + output = await response.text() + self.assert_metrics(output, metric_name, help_text, increments) + + async def test_openmetrics_encoding(self): + """Response content type is application/openmetrics-text when appropriate Accept header is in request""" + async with self.client.get( + "/metrics", + auto_decompress=False, + headers={hdrs.ACCEPT: "application/openmetrics-text; version=1.0.0"}, + ) as response: + response.raise_for_status() + self.assertEqual( + response.headers.getone(hdrs.CONTENT_TYPE).split(";", maxsplit=1)[0], + "application/openmetrics-text", + ) + + async def test_plaintext_encoding(self): + """Response content type is text/plain when Accept header is missing in request""" + async with self.client.get("/metrics") as response: + response.raise_for_status() + self.assertEqual( + response.headers.getone(hdrs.CONTENT_TYPE).split(";", maxsplit=1)[0], + "text/plain", + ) + + async def test_qs_parsing(self): + """Only metrics that match the 'name[]' query string param appear""" + + metrics = [("asdf", "first test metric", 1), ("bsdf", "second test metric", 2)] + + for m in metrics: + self.increment_metrics(*m) + + for i_1 in range(len(metrics)): + async with self.client.get( + "/metrics", + params={"name[]": f"{metrics[i_1][0]}_total"}, + ) as response: + output = await response.text() + self.assert_metrics(output, *metrics[i_1]) + + for i_2 in range(len(metrics)): + if i_1 == i_2: + continue + + self.assert_not_metrics(output, *metrics[i_2]) diff --git a/tests/test_asgi.py b/tests/test_asgi.py index 86431d2..d4933ce 100644 --- a/tests/test_asgi.py +++ b/tests/test_asgi.py @@ -1,19 +1,11 @@ +import asyncio import gzip -from unittest import skipUnless, TestCase +from unittest import TestCase -from prometheus_client import CollectorRegistry, Counter -from prometheus_client.exposition import CONTENT_TYPE_PLAIN_0_0_4 - -try: - # Python >3.5 only - import asyncio +from asgiref.testing import ApplicationCommunicator - from asgiref.testing import ApplicationCommunicator - - from prometheus_client import make_asgi_app - HAVE_ASYNCIO_AND_ASGI = True -except ImportError: - HAVE_ASYNCIO_AND_ASGI = False +from prometheus_client import CollectorRegistry, Counter, make_asgi_app +from prometheus_client.exposition import CONTENT_TYPE_PLAIN_0_0_4 def setup_testing_defaults(scope): @@ -33,7 +25,6 @@ def setup_testing_defaults(scope): class ASGITest(TestCase): - @skipUnless(HAVE_ASYNCIO_AND_ASGI, "Don't have asyncio/asgi installed.") def setUp(self): self.registry = CollectorRegistry() self.captured_status = None diff --git a/tests/test_core.py b/tests/test_core.py index 284bce0..c7c9c14 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -630,6 +630,40 @@ def test_labels_coerced_to_string(self): self.counter.remove(None) self.assertEqual(None, self.registry.get_sample_value('c_total', {'l': 'None'})) + def test_remove_by_labels(self): + from prometheus_client import Counter + + c = Counter('c2', 'help', ['tenant', 'endpoint'], registry=self.registry) + c.labels('acme', '/').inc() + c.labels('acme', '/checkout').inc() + c.labels('globex', '/').inc() + + ret = c.remove_by_labels({'tenant': 'acme'}) + self.assertIsNone(ret) + + self.assertIsNone(self.registry.get_sample_value('c2_total', {'tenant': 'acme', 'endpoint': '/'})) + self.assertIsNone(self.registry.get_sample_value('c2_total', {'tenant': 'acme', 'endpoint': '/checkout'})) + self.assertEqual(1, self.registry.get_sample_value('c2_total', {'tenant': 'globex', 'endpoint': '/'})) + + + def test_remove_by_labels_invalid_label_name(self): + from prometheus_client import Counter + c = Counter('c3', 'help', ['tenant', 'endpoint'], registry=self.registry) + c.labels('acme', '/').inc() + with self.assertRaises(ValueError): + c.remove_by_labels({'badkey': 'x'}) + + + def test_remove_by_labels_empty_is_noop(self): + from prometheus_client import Counter + c = Counter('c4', 'help', ['tenant', 'endpoint'], registry=self.registry) + c.labels('acme', '/').inc() + + ret = c.remove_by_labels({}) + self.assertIsNone(ret) + # Ensure the series is still present + self.assertEqual(1, self.registry.get_sample_value('c4_total', {'tenant': 'acme', 'endpoint': '/'})) + def test_non_string_labels_raises(self): class Test: __str__ = None diff --git a/tests/test_django.py b/tests/test_django.py new file mode 100644 index 0000000..659bb3f --- /dev/null +++ b/tests/test_django.py @@ -0,0 +1,48 @@ +from unittest import skipUnless + +from prometheus_client import CollectorRegistry, Counter, generate_latest +from prometheus_client.openmetrics.exposition import ALLOWUTF8 + +try: + import django + from django.test import RequestFactory, TestCase + + from prometheus_client.django import PrometheusDjangoView + + HAVE_DJANGO = True +except ImportError: + from unittest import TestCase + + HAVE_DJANGO = False + +else: + from django.conf import settings + + if not settings.configured: + settings.configure( + DATABASES={ + "default": { + "ENGINE": "django.db.backends.sqlite3", + 'NAME': ':memory:' + } + }, + INSTALLED_APPS=[], + ) + django.setup() + + +class MetricsResourceTest(TestCase): + @skipUnless(HAVE_DJANGO, "Don't have django installed.") + def setUp(self): + self.registry = CollectorRegistry() + self.factory = RequestFactory() + + def test_reports_metrics(self): + c = Counter('cc', 'A counter', registry=self.registry) + c.inc() + + request = self.factory.get("/metrics") + + response = PrometheusDjangoView.as_view(registry=self.registry)(request) + self.assertEqual(response.status_code, 200) + self.assertEqual(response.content, generate_latest(self.registry, ALLOWUTF8)) diff --git a/tests/test_exposition.py b/tests/test_exposition.py index 3dd5e37..aceff73 100644 --- a/tests/test_exposition.py +++ b/tests/test_exposition.py @@ -1,3 +1,4 @@ +import gzip from http.server import BaseHTTPRequestHandler, HTTPServer import os import threading @@ -404,6 +405,30 @@ def test_push_with_trailing_slash(self): self.assertNotIn('//', self.requests[0][0].path) + def test_push_with_gzip_compression(self): + push_to_gateway(self.address, "my_job", self.registry, compression='gzip') + request, body = self.requests[0] + self.assertEqual(request.headers.get('content-encoding'), 'gzip') + decompressed = gzip.decompress(body) + self.assertEqual(decompressed, b'# HELP g help\n# TYPE g gauge\ng 0.0\n') + + def test_push_with_snappy_compression(self): + snappy = pytest.importorskip('snappy') + push_to_gateway(self.address, "my_job", self.registry, compression='snappy') + request, body = self.requests[0] + self.assertEqual(request.headers.get('content-encoding'), 'snappy') + decompressor = snappy.StreamDecompressor() + decompressed = decompressor.decompress(body) + flush = getattr(decompressor, 'flush', None) + if callable(flush): + decompressed += flush() + self.assertEqual(decompressed, b'# HELP g help\n# TYPE g gauge\ng 0.0\n') + + def test_push_with_invalid_compression(self): + with self.assertRaisesRegex(ValueError, 'Unsupported compression type'): + push_to_gateway(self.address, "my_job", self.registry, compression='brotli') + self.assertEqual(self.requests, []) + def test_instance_ip_grouping_key(self): self.assertTrue('' != instance_ip_grouping_key()['instance']) diff --git a/tests/test_multiprocess.py b/tests/test_multiprocess.py index 77fd3d8..c2f71d2 100644 --- a/tests/test_multiprocess.py +++ b/tests/test_multiprocess.py @@ -276,10 +276,8 @@ def add_label(key, value): Sample('g', add_label('pid', '1'), 1.0), ]) - metrics['h'].samples.sort( - key=lambda x: (x[0], float(x[1].get('le', 0))) - ) expected_histogram = [ + Sample('h_sum', labels, 6.0), Sample('h_bucket', add_label('le', '0.005'), 0.0), Sample('h_bucket', add_label('le', '0.01'), 0.0), Sample('h_bucket', add_label('le', '0.025'), 0.0), @@ -296,7 +294,66 @@ def add_label(key, value): Sample('h_bucket', add_label('le', '10.0'), 2.0), Sample('h_bucket', add_label('le', '+Inf'), 2.0), Sample('h_count', labels, 2.0), - Sample('h_sum', labels, 6.0), + ] + + self.assertEqual(metrics['h'].samples, expected_histogram) + + def test_collect_histogram_ordering(self): + pid = 0 + values.ValueClass = MultiProcessValue(lambda: pid) + labels = {i: i for i in 'abcd'} + + def add_label(key, value): + l = labels.copy() + l[key] = value + return l + + h = Histogram('h', 'help', labelnames=['view'], registry=None) + + h.labels(view='view1').observe(1) + + pid = 1 + + h.labels(view='view1').observe(5) + h.labels(view='view2').observe(1) + + metrics = {m.name: m for m in self.collector.collect()} + + expected_histogram = [ + Sample('h_sum', {'view': 'view1'}, 6.0), + Sample('h_bucket', {'view': 'view1', 'le': '0.005'}, 0.0), + Sample('h_bucket', {'view': 'view1', 'le': '0.01'}, 0.0), + Sample('h_bucket', {'view': 'view1', 'le': '0.025'}, 0.0), + Sample('h_bucket', {'view': 'view1', 'le': '0.05'}, 0.0), + Sample('h_bucket', {'view': 'view1', 'le': '0.075'}, 0.0), + Sample('h_bucket', {'view': 'view1', 'le': '0.1'}, 0.0), + Sample('h_bucket', {'view': 'view1', 'le': '0.25'}, 0.0), + Sample('h_bucket', {'view': 'view1', 'le': '0.5'}, 0.0), + Sample('h_bucket', {'view': 'view1', 'le': '0.75'}, 0.0), + Sample('h_bucket', {'view': 'view1', 'le': '1.0'}, 1.0), + Sample('h_bucket', {'view': 'view1', 'le': '2.5'}, 1.0), + Sample('h_bucket', {'view': 'view1', 'le': '5.0'}, 2.0), + Sample('h_bucket', {'view': 'view1', 'le': '7.5'}, 2.0), + Sample('h_bucket', {'view': 'view1', 'le': '10.0'}, 2.0), + Sample('h_bucket', {'view': 'view1', 'le': '+Inf'}, 2.0), + Sample('h_count', {'view': 'view1'}, 2.0), + Sample('h_sum', {'view': 'view2'}, 1.0), + Sample('h_bucket', {'view': 'view2', 'le': '0.005'}, 0.0), + Sample('h_bucket', {'view': 'view2', 'le': '0.01'}, 0.0), + Sample('h_bucket', {'view': 'view2', 'le': '0.025'}, 0.0), + Sample('h_bucket', {'view': 'view2', 'le': '0.05'}, 0.0), + Sample('h_bucket', {'view': 'view2', 'le': '0.075'}, 0.0), + Sample('h_bucket', {'view': 'view2', 'le': '0.1'}, 0.0), + Sample('h_bucket', {'view': 'view2', 'le': '0.25'}, 0.0), + Sample('h_bucket', {'view': 'view2', 'le': '0.5'}, 0.0), + Sample('h_bucket', {'view': 'view2', 'le': '0.75'}, 0.0), + Sample('h_bucket', {'view': 'view2', 'le': '1.0'}, 1.0), + Sample('h_bucket', {'view': 'view2', 'le': '2.5'}, 1.0), + Sample('h_bucket', {'view': 'view2', 'le': '5.0'}, 1.0), + Sample('h_bucket', {'view': 'view2', 'le': '7.5'}, 1.0), + Sample('h_bucket', {'view': 'view2', 'le': '10.0'}, 1.0), + Sample('h_bucket', {'view': 'view2', 'le': '+Inf'}, 1.0), + Sample('h_count', {'view': 'view2'}, 1.0), ] self.assertEqual(metrics['h'].samples, expected_histogram) @@ -347,10 +404,8 @@ def add_label(key, value): m.name: m for m in self.collector.merge(files, accumulate=False) } - metrics['h'].samples.sort( - key=lambda x: (x[0], float(x[1].get('le', 0))) - ) expected_histogram = [ + Sample('h_sum', labels, 6.0), Sample('h_bucket', add_label('le', '0.005'), 0.0), Sample('h_bucket', add_label('le', '0.01'), 0.0), Sample('h_bucket', add_label('le', '0.025'), 0.0), @@ -366,7 +421,6 @@ def add_label(key, value): Sample('h_bucket', add_label('le', '7.5'), 0.0), Sample('h_bucket', add_label('le', '10.0'), 0.0), Sample('h_bucket', add_label('le', '+Inf'), 0.0), - Sample('h_sum', labels, 6.0), ] self.assertEqual(metrics['h'].samples, expected_histogram) @@ -396,6 +450,116 @@ def test_remove_clear_warning(self): assert "Removal of labels has not been implemented" in str(w[0].message) assert issubclass(w[-1].category, UserWarning) assert "Clearing labels has not been implemented" in str(w[-1].message) + + def test_child_name_is_built_once_with_namespace_subsystem_unit(self): + """ + Repro for #1035: + In multiprocess mode, child metrics must NOT rebuild the full name + (namespace/subsystem/unit) a second time. The exported family name should + be built once, and Counter samples should use "_total". + """ + from prometheus_client import Counter + + class CustomCounter(Counter): + def __init__( + self, + name, + documentation, + labelnames=(), + namespace="mydefaultnamespace", + subsystem="mydefaultsubsystem", + unit="", + registry=None, + _labelvalues=None + ): + # Intentionally provide non-empty defaults to trigger the bug path. + super().__init__( + name=name, + documentation=documentation, + labelnames=labelnames, + namespace=namespace, + subsystem=subsystem, + unit=unit, + registry=registry, + _labelvalues=_labelvalues) + + # Create a Counter with explicit namespace/subsystem/unit + c = CustomCounter( + name='m', + documentation='help', + labelnames=('status', 'method'), + namespace='ns', + subsystem='ss', + unit='seconds', # avoid '_total_total' confusion + registry=None, # not registered in local registry in multiprocess mode + ) + + # Create two labeled children + c.labels(status='200', method='GET').inc() + c.labels(status='404', method='POST').inc() + + # Collect from the multiprocess collector initialized in setUp() + metrics = {m.name: m for m in self.collector.collect()} + + # Family name should be built once (no '_total' in family name) + expected_family = 'ns_ss_m_seconds' + self.assertIn(expected_family, metrics, f"missing family {expected_family}") + + # Counter samples must use '_total' + mf = metrics[expected_family] + sample_names = {s.name for s in mf.samples} + self.assertTrue( + all(name == expected_family + '_total' for name in sample_names), + f"unexpected sample names: {sample_names}" + ) + + # Ensure no double-built prefix sneaks in (the original bug) + bad_prefix = 'mydefaultnamespace_mydefaultsubsystem_' + all_names = {mf.name, *sample_names} + self.assertTrue( + all(not n.startswith(bad_prefix) for n in all_names), + f"found double-built name(s): {[n for n in all_names if n.startswith(bad_prefix)]}" + ) + + def test_child_preserves_parent_context_for_subclasses(self): + """ + Ensure child metrics preserve parent's namespace/subsystem/unit information + so that subclasses can correctly use these parameters in their logic. + """ + class ContextAwareCounter(Counter): + def __init__(self, + name, + documentation, + labelnames=(), + namespace="", + subsystem="", + unit="", + **kwargs): + self.context = { + 'namespace': namespace, + 'subsystem': subsystem, + 'unit': unit + } + super().__init__(name, documentation, + labelnames=labelnames, + namespace=namespace, + subsystem=subsystem, + unit=unit, + **kwargs) + + parent = ContextAwareCounter('m', 'help', + labelnames=['status'], + namespace='prod', + subsystem='api', + unit='seconds', + registry=None) + + child = parent.labels(status='200') + + # Verify that child retains parent's context + self.assertEqual(child.context['namespace'], 'prod') + self.assertEqual(child.context['subsystem'], 'api') + self.assertEqual(child.context['unit'], 'seconds') class TestMmapedDict(unittest.TestCase): diff --git a/tox.ini b/tox.ini index 4033702..992bd0a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,13 +1,17 @@ [tox] -envlist = coverage-clean,py{3.9,3.10,3.11,3.12,3.13,py3.9,3.9-nooptionals},coverage-report,flake8,isort,mypy +envlist = coverage-clean,py{3.9,3.10,3.11,3.12,3.13,3.14,py3.9,3.9-nooptionals},coverage-report,flake8,isort,mypy [testenv] deps = + asgiref coverage pytest pytest-benchmark attrs {py3.9,pypy3.9}: twisted + {py3.9,pypy3.9}: aiohttp + {py3.9,pypy3.9}: django + {py3.9}: python-snappy commands = coverage run --parallel -m pytest {posargs} [testenv:py3.9-nooptionals] @@ -44,6 +48,7 @@ commands = [testenv:mypy] deps = pytest + aiohttp asgiref mypy==0.991 skip_install = true