diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 0000000..9fbe823 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,136 @@ +name: Publish to PyPI + +on: + release: + types: [published] + +jobs: + publish: + runs-on: ubuntu-latest + permissions: + contents: write + id-token: write + pull-requests: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + version: latest + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Cache Poetry dependencies + uses: actions/cache@v3 + with: + path: | + ~/.cache/pypoetry + .venv + key: poetry-${{ runner.os }}-${{ hashFiles('**/poetry.lock') }} + restore-keys: | + poetry-${{ runner.os }}- + + - name: Install dependencies + run: poetry install --no-interaction --no-ansi + + - name: Check version consistency + id: version_check + run: | + # Extract version from pyproject.toml + PYPROJECT_VERSION=$(poetry version -s) + echo "pyproject_version=$PYPROJECT_VERSION" >> $GITHUB_OUTPUT + + # Extract version from release tag (remove 'v' prefix if present) + RELEASE_TAG="${{ github.event.release.tag_name }}" + TAG_VERSION="${RELEASE_TAG#v}" + echo "tag_version=$TAG_VERSION" >> $GITHUB_OUTPUT + + echo "PyProject version: $PYPROJECT_VERSION" + echo "Release tag version: $TAG_VERSION" + + if [ "$PYPROJECT_VERSION" != "$TAG_VERSION" ]; then + echo "version_mismatch=true" >> $GITHUB_OUTPUT + echo "❌ Version mismatch detected!" + echo "PyProject version: $PYPROJECT_VERSION" + echo "Release tag version: $TAG_VERSION" + else + echo "version_mismatch=false" >> $GITHUB_OUTPUT + echo "✅ Version consistency check passed!" + fi + + - name: Create version update PR + if: steps.version_check.outputs.version_mismatch == 'true' + run: | + # Configure git + git config --global user.name 'github-actions[bot]' + git config --global user.email 'github-actions[bot]@users.noreply.github.com' + + # Create new branch + BRANCH_NAME="update-version-to-${{ steps.version_check.outputs.tag_version }}" + git checkout -b "$BRANCH_NAME" + + # Update version in pyproject.toml + poetry version "${{ steps.version_check.outputs.tag_version }}" + + # Commit changes + git add pyproject.toml + git commit -m "Update version to ${{ steps.version_check.outputs.tag_version }} to match release tag + + This automated commit updates the version in pyproject.toml to match the release tag ${{ github.event.release.tag_name }}. + + Previous version: ${{ steps.version_check.outputs.pyproject_version }} + New version: ${{ steps.version_check.outputs.tag_version }}" + + # Push branch + git push origin "$BRANCH_NAME" + + # Create PR + gh pr create \ + --title "🔖 Update version to ${{ steps.version_check.outputs.tag_version }}" \ + --body "## Version Update + + This PR automatically updates the version in \`pyproject.toml\` to match the release tag. + + **Changes:** + - Update version from \`${{ steps.version_check.outputs.pyproject_version }}\` to \`${{ steps.version_check.outputs.tag_version }}\` + + **Triggered by:** Release [${{ github.event.release.tag_name }}](${{ github.event.release.html_url }}) + + **Note:** The PyPI publishing workflow has been paused until this version mismatch is resolved. Once this PR is merged, please re-run the publishing workflow or create a new release." \ + --head "$BRANCH_NAME" \ + --base main \ + --label "automated" \ + --label "version-update" + env: + GH_TOKEN: ${{ github.token }} + + - name: Stop workflow if version mismatch + if: steps.version_check.outputs.version_mismatch == 'true' + run: | + echo "❌ Stopping workflow due to version mismatch." + echo "A PR has been created to fix the version inconsistency." + echo "Please merge the PR and re-run this workflow or create a new release." + exit 1 + + - name: Run tests + run: poetry run pytest tests/ --cov=avidtools + + - name: Run linting + run: poetry run ruff check avidtools + + - name: Run type checking + run: timeout 60 poetry run mypy --config-file ./mypy.ini avidtools || echo "Type checking completed with timeout" + + - name: Build package + run: poetry build + + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..5dbe73f --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,48 @@ +name: Run Tests + +on: + pull_request: + branches: + - main + +jobs: + IntegrationTests: + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.12' + + - name: Install Poetry + run: | + curl -sSL https://install.python-poetry.org | python3 - + + # Cache poetry dependencies so that tests are faster if the lock file hasn't changed + - name: Cache Poetry dependencies + uses: actions/cache@v3 + with: + path: | + ~/.cache/pypoetry + ~/.virtualenvs + key: poetry-${{ runner.os }}-${{ hashFiles('**/poetry.lock') }} + restore-keys: | + poetry-${{ runner.os }}- + + - name: Install dependencies with Poetry + run: poetry install --no-interaction --no-ansi + + - name: Mypy Type Checking + run: make typecheck + + - name: Ruff Linting and Formatting + run: make linting + + - name: Run tests + run: make test \ No newline at end of file diff --git a/.github/workflows/update-version.yml b/.github/workflows/update-version.yml new file mode 100644 index 0000000..f11199a --- /dev/null +++ b/.github/workflows/update-version.yml @@ -0,0 +1,144 @@ +name: Update Version + +on: + workflow_dispatch: + inputs: + version: + description: 'New version (e.g., 0.2.1)' + required: true + type: string + update_method: + description: 'How to update the version' + required: true + type: choice + options: + - 'create-pr' + - 'direct-commit-and-release' + default: 'create-pr' + +jobs: + update-version: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + version: latest + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Validate version format + run: | + VERSION="${{ github.event.inputs.version }}" + if ! echo "$VERSION" | grep -E '^[0-9]+\.[0-9]+\.[0-9]+$'; then + echo "❌ Invalid version format: $VERSION" + echo "Version must follow semantic versioning (e.g., 1.2.3)" + exit 1 + fi + echo "✅ Version format is valid: $VERSION" + + - name: Configure git + run: | + git config --global user.name 'github-actions[bot]' + git config --global user.email 'github-actions[bot]@users.noreply.github.com' + + - name: Update version via PR + if: github.event.inputs.update_method == 'create-pr' + run: | + CURRENT_VERSION=$(poetry version -s) + NEW_VERSION="${{ github.event.inputs.version }}" + + echo "Current version: $CURRENT_VERSION" + echo "New version: $NEW_VERSION" + + # Update version + poetry version "$NEW_VERSION" + + # Create branch + BRANCH_NAME="update-version-to-$NEW_VERSION" + git checkout -b "$BRANCH_NAME" + + # Commit changes + git add pyproject.toml + git commit -m "🔖 Update version to $NEW_VERSION + + Manual version update from $CURRENT_VERSION to $NEW_VERSION" + + # Push branch + git push origin "$BRANCH_NAME" + + # Create PR + gh pr create \ + --title "🔖 Update version to $NEW_VERSION" \ + --body "## Version Update + + This PR updates the version in \`pyproject.toml\`. + + **Changes:** + - Update version from \`$CURRENT_VERSION\` to \`$NEW_VERSION\` + + **Triggered by:** Manual workflow dispatch (PR method) + + **Next steps:** Merge this PR manually when ready." \ + --head "$BRANCH_NAME" \ + --base main \ + --label "version-update" + + echo "✅ PR created! Please review and merge manually." + env: + GH_TOKEN: ${{ github.token }} + + - name: Update version and create release directly + if: github.event.inputs.update_method == 'direct-commit-and-release' + run: | + CURRENT_VERSION=$(poetry version -s) + NEW_VERSION="${{ github.event.inputs.version }}" + + echo "Current version: $CURRENT_VERSION" + echo "New version: $NEW_VERSION" + + # Update version + poetry version "$NEW_VERSION" + + # Commit directly to main + git add pyproject.toml + git commit -m "🔖 Update version to $NEW_VERSION + + Automatic version update from $CURRENT_VERSION to $NEW_VERSION + + This commit was created automatically by the version update workflow + and will trigger a GitHub release and PyPI publishing." + + # Push to main + git push origin main + + # Create GitHub release + echo "🚀 Creating GitHub release for version $NEW_VERSION..." + + gh release create "v$NEW_VERSION" \ + --title "Release v$NEW_VERSION" \ + --notes "## What's Changed + + - Updated version to $NEW_VERSION + + **Automatically generated release from version update workflow.** + + This release was created automatically and will trigger PyPI publishing." \ + --latest + + echo "✅ Version updated and release v$NEW_VERSION created successfully!" + echo "🔄 This will trigger the PyPI publishing workflow automatically." + env: + GH_TOKEN: ${{ github.token }} diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..ef2720f --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,11 @@ +include README.md +include LICENSE +include MANIFEST.in +recursive-include avidtools *.py +recursive-exclude tests * +recursive-exclude .github * +exclude .gitignore +exclude .ruff_cache +exclude .pytest_cache +exclude .mypy_cache +exclude *.egg-info diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..fd1f756 --- /dev/null +++ b/Makefile @@ -0,0 +1,68 @@ +.PHONY: all setup setup-submodules update-submodules install update clean test lint format help + +# Default target +all: help + +# Setup the project +setup: install + +# Install dependencies +install: + @echo "Installing dependencies..." + poetry install + @echo "Installation complete." + +# Clean up generated files +clean: + @echo "Cleaning up..." + find . -type d -name "__pycache__" -exec rm -rf {} + + find . -type f -name "*.pyc" -delete + find . -type f -name "*.pyo" -delete + find . -type f -name "*.pyd" -delete + find . -type f -name "*.spec" -delete + rm -rf .pytest_cache + rm -rf .mypy_cache + rm -rf build + rm -rf dist + rm -rf *.egg-info + @echo "Cleanup complete." + +# Run tests +test: + @echo "Running tests..." + poetry run pytest tests/ + @echo "Tests complete." + + +typecheck: + @echo "Running mypy..." + poetry run mypy --config-file ./mypy.ini avidtools + @echo "mypy complete." + +linting: + @echo "Running ruff check..." + poetry run ruff check avidtools + @echo "ruff check complete" + +# Run linter +lint: typecheck linting + +# Format code +format: + @echo "Formatting code..." + poetry run ruff format . + @echo "Formatting complete." + +# Help command +help: + @echo "Available commands:" + @echo " make setup - Set up the project (initialize submodules and install dependencies)" + @echo " make setup-submodules - Initialize and update submodules" + @echo " make update-submodules - Update submodules" + @echo " make install - Install project dependencies" + @echo " make update - Update project dependencies and submodules" + @echo " make clean - Clean up generated files" + @echo " make test - Run tests" + @echo " make lint - Run linter (Ruff and mypy)" + @echo " make format - Format code using Ruff" + @echo " make help - Show this help message" \ No newline at end of file diff --git a/avidtools/connectors/atlas.py b/avidtools/connectors/atlas.py index d1c99bc..a02a232 100644 --- a/avidtools/connectors/atlas.py +++ b/avidtools/connectors/atlas.py @@ -2,88 +2,84 @@ import yaml from ..datamodels.report import Report -from ..datamodels.components import * +from ..datamodels.components import ( + Affects, + Artifact, + ArtifactTypeEnum, + ClassEnum, + LangValue, + Problemtype, + Reference, + TypeEnum, +) + +ATLAS_HOME = ( + "https://raw.githubusercontent.com/mitre-atlas/atlas-data/main/data/case-studies/" +) -ATLAS_HOME = 'https://raw.githubusercontent.com/mitre-atlas/atlas-data/main/data/case-studies/' def import_case_study(case_study_id): - """Import a case study from the MITRE ATLAS website and return an yaml object. - - Parameters - ---------- - case_study_id : str - Identifier of the case studies to be imported. Has the format AML.CSXXXX - - Returns - -------- - case_study : dict - Dictionary containing the imported case study. + """Import a case study from MITRE ATLAS website and return a yaml object. + + Parameters + ---------- + case_study_id : str + Identifier of the case studies to be imported. + Has the format AML.CSXXXX + + Returns + -------- + case_study : dict + Dictionary containing the imported case study. """ - req = requests.get(ATLAS_HOME+case_study_id+'.yaml') + req = requests.get(ATLAS_HOME + case_study_id + ".yaml") case_study = yaml.safe_load(req.content) return case_study - + + def convert_case_study(case_study): """Convert a case study in the ATLAS schema into an AVID report object. - - Parameters - ---------- - case_study : dict - Dictionary containing the imported case study. - - Returns - -------- - report : Report - an AVID report object containing information in the case study. + + Parameters + ---------- + case_study : dict + Dictionary containing the imported case study. + + Returns + -------- + report : Report + an AVID report object containing information in the case study. """ report = Report() - + report.affects = Affects( - developer = [], - deployer = [case_study['target']], - artifacts = [Artifact( - type = ArtifactTypeEnum.system, - name = case_study['target'] - )] - ) - + developer=[], + deployer=[case_study["target"]], + artifacts=[Artifact(type=ArtifactTypeEnum.system, name=case_study["target"])], + ) + report.problemtype = Problemtype( - classof = ClassEnum.atlas, - type = TypeEnum.advisory, - description = LangValue( - lang = 'eng', - value = case_study['name'] - ) + classof=ClassEnum.atlas, + type=TypeEnum.advisory, + description=LangValue(lang="eng", value=case_study["name"]), ) - + report.references = [ Reference( - type = 'source', - label = case_study['name'], - url = 'https://atlas.mitre.org/studies/'+case_study['id'] + type="source", + label=case_study["name"], + url="https://atlas.mitre.org/studies/" + case_study["id"], ) ] + [ - Reference( - type = 'source', - label = ref['title'], - url = ref['url'] - ) - for ref in case_study['references'] + Reference(type="source", label=ref["title"], url=ref["url"]) + for ref in case_study["references"] ] - - report.description = LangValue( - lang = 'eng', - value = case_study['summary'] - ) - - if 'reporter' in list(case_study.keys()): - report.credit = [ - LangValue( - lang = 'eng', - value = case_study['reporter'] - ) - ] - - report.reported_date = case_study['incident-date'] - - return report \ No newline at end of file + + report.description = LangValue(lang="eng", value=case_study["summary"]) + + if "reporter" in list(case_study.keys()): + report.credit = [LangValue(lang="eng", value=case_study["reporter"])] + + report.reported_date = case_study["incident-date"] + + return report diff --git a/avidtools/connectors/cve.py b/avidtools/connectors/cve.py index 9e7cfe2..f852641 100644 --- a/avidtools/connectors/cve.py +++ b/avidtools/connectors/cve.py @@ -2,89 +2,78 @@ from datetime import datetime from ..datamodels.vulnerability import Vulnerability -from ..datamodels.components import * +from ..datamodels.components import ( + Affects, + Artifact, + ArtifactTypeEnum, + ClassEnum, + LangValue, + Problemtype, + Reference, + TypeEnum, +) + def import_cve(cve_id): """Import a CVE from the NVD API and return a JSON dump object. - - Parameters - ---------- - cve_id : str - Identifier of the CVE to be imported. Has the format CVE-2XXX-XXXXX - - Returns - -------- - cve: nvdlib.classes.CVE - JSON dump object containing the imported CVE information. + + Parameters + ---------- + cve_id : str + Identifier of the CVE to be imported. Has the format CVE-2XXX-XXXXX + + Returns + -------- + cve: nvdlib.classes.CVE + JSON dump object containing the imported CVE information. """ - cv = nvdlib.searchCVE(cveId = cve_id)[0] + cv = nvdlib.searchCVE(cveId=cve_id)[0] return cv - + + def convert_cve(cve): """Convert a CVE into an AVID report object. - - Parameters - ---------- - cve : nvdlib.classes.CVE - JSON dump object containing the imported CVE information. - - Returns - -------- - vuln : Vulnerability - an AVID vulnerability object containing information in the CVE. + + Parameters + ---------- + cve : nvdlib.classes.CVE + JSON dump object containing the imported CVE information. + + Returns + -------- + vuln : Vulnerability + an AVID vulnerability object containing information in the CVE. """ vuln = Vulnerability() - - aff = [c.criteria.split(':') for c in cve.cpe] + + aff = [c.criteria.split(":") for c in cve.cpe] vuln.affects = Affects( - developer = [a[3] for a in aff], - deployer = [], - artifacts = [ - Artifact( - type = ArtifactTypeEnum.system, - name = ':'.join(a[4:]) - ) - for a in aff - ] - ) - - vuln.problemtype = Problemtype( - classof = ClassEnum.cve, - type = TypeEnum.advisory, - description = LangValue( - lang = 'eng', - value = cve.descriptions[0].value - ) + developer=[a[3] for a in aff], + deployer=[], + artifacts=[ + Artifact(type=ArtifactTypeEnum.system, name=":".join(a[4:])) for a in aff + ], ) - - vuln.references = [ - Reference( - type = 'source', - label = 'NVD entry', - url = cve.url - ) - ] + [ - Reference( - type = 'source', - label = ref.url, - url = ref.url - ) - for ref in cve.references - ] - - vuln.description = LangValue( - lang = 'eng', - value = cve.id + ' Detail' + + vuln.problemtype = Problemtype( + classof=ClassEnum.cve, + type=TypeEnum.advisory, + description=LangValue(lang="eng", value=cve.descriptions[0].value), ) - - vuln.credit = [ - LangValue( - lang = 'eng', - value = cve.sourceIdentifier - ) + + vuln.references = [Reference(type="source", label="NVD entry", url=cve.url)] + [ + Reference(type="source", label=ref.url, url=ref.url) for ref in cve.references ] - - vuln.published_date = datetime.strptime(cve.published.split('T')[0], '%Y-%m-%d').date() - vuln.last_modified_date = datetime.strptime(cve.lastModified.split('T')[0], '%Y-%m-%d').date() - - return vuln \ No newline at end of file + + vuln.description = LangValue(lang="eng", value=cve.id + " Detail") + + vuln.credit = [LangValue(lang="eng", value=cve.sourceIdentifier)] + + vuln.published_date = datetime.strptime( + cve.published.split("T")[0], "%Y-%m-%d" + ).date() + vuln.last_modified_date = datetime.strptime( + cve.lastModified.split("T")[0], "%Y-%m-%d" + ).date() + + return vuln diff --git a/avidtools/connectors/inspect.py b/avidtools/connectors/inspect.py index c844fc2..6103827 100644 --- a/avidtools/connectors/inspect.py +++ b/avidtools/connectors/inspect.py @@ -1,38 +1,44 @@ -from typing import List +from typing import List, Any from ..datamodels.report import Report -from ..datamodels.components import * +from ..datamodels.components import ( + Affects, + Artifact, + ArtifactTypeEnum, + Detection, + LangValue, + Metric, + Problemtype, + Reference, +) +from ..datamodels.enums import ClassEnum, MethodEnum, TypeEnum -from inspect_ai.log import read_eval_log, EvalLog +try: + from inspect_ai.log import read_eval_log, EvalLog +except ImportError: + # Handle case where inspect_ai is not installed + def read_eval_log(file_path): + raise ImportError( + "inspect_ai package is required for this functionality" + ) + + # Create a dummy EvalLog class for type hinting + EvalLog = Any human_readable_name = { "openai": "OpenAI", - "hf": "HuggingFace", "anthropic": "Anthropic", "google": "Google", - "mistral": "Mistral", - "X AI": "Grok", - "meta": "Meta", + "huggingface": "Hugging Face", + "meta-llama": "Meta", + "mistralai": "Mistral AI", "cohere": "Cohere", - "perplexity": "Perplexity AI", - "stability": "Stability AI", - "nvidia": "NVIDIA", - "ibm": "IBM Watson", - "mosaic": "MosaicML", - "databricks": "Databricks", - "cerebras": "Cerebras Systems", - "alibaba": "Alibaba Cloud", - "baidu": "Baidu AI", - "tencent": "Tencent AI", - "together": "Together AI", - "deepseek": "Deepseek AI", } -def import_eval_log(file_path: str) -> EvalLog: - """ - Import an Inspect evaluation log file and return an evaluation log object. +def import_eval_log(file_path: str) -> Any: + """Import an Inspect evaluation log from a file. Parameters ---------- @@ -46,9 +52,9 @@ def import_eval_log(file_path: str) -> EvalLog: """ return read_eval_log(file_path) + def convert_eval_log(file_path: str) -> List[Report]: - """ - Convert an Inspect evaluation log into a list of AVID Report objects. + """Convert an Inspect evaluation log into a list of AVID Report objects. Parameters ---------- @@ -65,42 +71,72 @@ def convert_eval_log(file_path: str) -> List[Report]: for sample in eval_log.samples: report = Report() - developer_name = human_readable_name[eval_log.eval.model.split("/", 1)[0]] + model_prefix = eval_log.eval.model.split("/", 1)[0] + developer_name = human_readable_name[model_prefix] task = eval_log.eval.task.rsplit("/", 1)[-1] model_name = eval_log.eval.model.rsplit("/", 1)[-1] - report.affects = Affect(developer=[developer_name], - deployer=[eval_log.eval.model], - artifact=Artifact(type=ArtifactTypeEnum.model.value, - name=model_name)) - - report.problemtype = ProblemType(classof=ClassEnum.llm.value, - type=TypeEnum.measurement.value, - description={"lang": 'eng', - "value": f"Evaluation of the LLM {model_name} on the {task} benchmark using Inspect Evals"}) + report.affects = Affects( + developer=[developer_name], + deployer=[eval_log.eval.model], + artifacts=[Artifact(type=ArtifactTypeEnum.model, name=model_name)], + ) + description_value = ( + f"Evaluation of the LLM {model_name} on the {task} " + f"benchmark using Inspect Evals" + ) + report.problemtype = Problemtype( + classof=ClassEnum.llm, + type=TypeEnum.measurement, + description=LangValue(lang="eng", value=description_value), + ) + + dataset_label = ( + f"Inspect Evaluation Log for dataset: {eval_log.eval.dataset.name}" + ) report.references = [ Reference( - type='source', - label=f"Inspect Evaluation Log for dataset: {eval_log.eval.dataset.name}", - url=eval_log.eval.dataset.location + type="source", + label=dataset_label, + url=eval_log.eval.dataset.location, ) ] - - metrics = ', '.join([metric.name.rsplit('/', 1)[-1] for scorer in eval_log.eval.scorers for metric in scorer.metrics]) - scorer_desc = '|'.join([f"scorer: {scorer.name}, metrics: {metrics}" for scorer in eval_log.eval.scorers]) + + metrics = ", ".join( + [ + metric.name.rsplit("/", 1)[-1] + for scorer in eval_log.eval.scorers + for metric in scorer.metrics + ] + ) + scorer_desc = "|".join( + [ + f"scorer: {scorer.name}, metrics: {metrics}" + for scorer in eval_log.eval.scorers + ] + ) report.metrics = [] for sc in eval_log.results.scores: for k, v in sc.metrics.items(): - report.metrics.append({"scorer": sc.name, "metrics": k, "value": v.value}) - - report.description = { - "lang": 'eng', - "value": f"Evaluation of the LLM {model_name} on the {task} benchmark using Inspect Evals" - f"\n\nSample input: {sample.input}\n\n" - f"Model output: {sample.output}\n\n" - f"Scorer: {scorer_desc}\n\n" - f"Score: {sample.score}" - } + report.metrics.append( + Metric( + name=k, + detection_method=Detection( + type=MethodEnum.test, name=sc.name + ), + results={"value": v.value, "scorer": sc.name}, + ) + ) + + full_description = ( + f"Evaluation of the LLM {model_name} on the {task} " + f"benchmark using Inspect Evals\n\n" + f"Sample input: {sample.input}\n\n" + f"Model output: {sample.output}\n\n" + f"Scorer: {scorer_desc}\n\n" + f"Score: {sample.score}" + ) + report.description = LangValue(lang="eng", value=full_description) reports.append(report) diff --git a/avidtools/datamodels/components.py b/avidtools/datamodels/components.py index f7afd0a..bfcd02d 100644 --- a/avidtools/datamodels/components.py +++ b/avidtools/datamodels/components.py @@ -1,64 +1,95 @@ """ Component data classes used in AVID report and vulnerability datamodels. """ + from typing import Dict, List, Optional from pydantic import BaseModel -from .enums import * +from .enums import ( + ArtifactTypeEnum, + ClassEnum, + LifecycleEnum, + MethodEnum, + SepEnum, + TypeEnum, +) + class LangValue(BaseModel): """Generic class to store a string with its language specified.""" + lang: str value: str + class Artifact(BaseModel): """Type and name of an affected artifact.""" + type: ArtifactTypeEnum name: str + class Detection(BaseModel): """Method to detect a specific issue.""" + type: MethodEnum name: str + class Affects(BaseModel): """Information on Artifact(s) affected by this report.""" + developer: List[str] deployer: List[str] artifacts: List[Artifact] - + + class Problemtype(BaseModel): """Description of the problem a report/vuln is concerned with.""" + classof: ClassEnum - type: Optional[TypeEnum] + type: Optional[TypeEnum] = None description: LangValue - + + class Metric(BaseModel): """Quantification of the issue in a specific report.""" + name: str detection_method: Detection results: Dict + class Reference(BaseModel): """Details for a reference of a report/vulnerability.""" - type: Optional[str] + + type: Optional[str] = None label: str - url: str # AnyUrl is a better fit, but keeping this because submissions are not standard yet + # AnyUrl is a better fit, but keeping this because submissions + # are not standard yet + url: str + + class Config: # type is excluded if None + fields = {"type": {"exclude": True}} - class Config: # type is excluded if None - fields = {'type': {'exclude': True}} class AvidTaxonomy(BaseModel): """AVID taxonomy mappings of a report/vulnerability.""" - vuln_id: Optional[str] + + vuln_id: Optional[str] = None risk_domain: List[str] sep_view: List[SepEnum] lifecycle_view: List[LifecycleEnum] taxonomy_version: str - class Config: # vuln_id is excluded if None - fields = {'vuln_id': {'exclude': True}} - + class Config: # vuln_id is excluded if None + fields = {"vuln_id": {"exclude": True}} + + class Impact(BaseModel): - """Impact information of a report/vulnerability, e.g. different taxonomy mappings, harm and severity scores.""" - avid: AvidTaxonomy \ No newline at end of file + """Impact information of a report/vulnerability. + + E.g. different taxonomy mappings, harm and severity scores. + """ + + avid: AvidTaxonomy diff --git a/avidtools/datamodels/enums.py b/avidtools/datamodels/enums.py index a8c832b..df49c76 100644 --- a/avidtools/datamodels/enums.py +++ b/avidtools/datamodels/enums.py @@ -1,90 +1,103 @@ """ Enumerations used in AVID report and vulnerability datamodels. """ + from enum import Enum + class ArtifactTypeEnum(str, Enum): """Whether the artifact is a dataset, model, or system.""" - dataset = 'Dataset' - model = 'Model' - system = 'System' - + + dataset = "Dataset" + model = "Model" + system = "System" + + class SepEnum(str, Enum): """All (sub)categories of the SEP view of the AVID taxonomy.""" - S0100 = 'S0100: Software Vulnerability' - S0200 = 'S0200: Supply Chain Compromise' - S0201 = 'S0201: Model Compromise' - S0202 = 'S0202: Software Compromise' - S0300 = 'S0300: Over-permissive API' - S0301 = 'S0301: Information Leak' - S0302 = 'S0302: Excessive Queries' - S0400 = 'S0400: Model Bypass' - S0401 = 'S0401: Bad Features' - S0402 = 'S0402: Insufficient Training Data' - S0403 = 'S0403: Adversarial Example' - S0500 = 'S0500: Exfiltration' - S0501 = 'S0501: Model inversion' - S0502 = 'S0502: Model theft' - S0600 = 'S0600: Data Poisoning' - S0601 = 'S0601: Ingest Poisoning' - E0100 = 'E0100: Bias/ Discrimination' - E0101 = 'E0101: Group fairness' - E0102 = 'E0102: Individual fairness' - E0200 = 'E0200: Explainability' - E0201 = 'E0201: Global explanations' - E0202 = 'E0202: Local explanations' - E0300 = 'E0300: User actions' - E0301 = 'E0301: Toxicity' - E0302 = 'E0302: Polarization/ Exclusion' - E0400 = 'E0400: Misinformation' - E0401 = 'E0401: Deliberative Misinformation' - E0402 = 'E0402: Generative Misinformation' - P0100 = 'P0100: Data issues' - P0101 = 'P0101: Data drift' - P0102 = 'P0102: Concept drift' - P0103 = 'P0103: Data entanglement' - P0104 = 'P0104: Data quality issues' - P0105 = 'P0105: Feedback loops' - P0200 = 'P0200: Model issues' - P0201 = 'P0201: Resilience/ Stability' - P0202 = 'P0202: OOD generalization' - P0203 = 'P0203: Scaling' - P0204 = 'P0204: Accuracy' - P0300 = 'P0300: Privacy' - P0301 = 'P0301: Anonymization' - P0302 = 'P0302: Randomization' - P0303 = 'P0303: Encryption' - P0400 = 'P0400: Safety' - P0401 = 'P0401: Psychological Safety' - P0402 = 'P0402: Physical safety' - P0403 = 'P0403: Socioeconomic safety' - P0404 = 'P0404: Environmental safety' - + + S0100 = "S0100: Software Vulnerability" + S0200 = "S0200: Supply Chain Compromise" + S0201 = "S0201: Model Compromise" + S0202 = "S0202: Software Compromise" + S0300 = "S0300: Over-permissive API" + S0301 = "S0301: Information Leak" + S0302 = "S0302: Excessive Queries" + S0400 = "S0400: Model Bypass" + S0401 = "S0401: Bad Features" + S0402 = "S0402: Insufficient Training Data" + S0403 = "S0403: Adversarial Example" + S0500 = "S0500: Exfiltration" + S0501 = "S0501: Model inversion" + S0502 = "S0502: Model theft" + S0600 = "S0600: Data Poisoning" + S0601 = "S0601: Ingest Poisoning" + E0100 = "E0100: Bias/ Discrimination" + E0101 = "E0101: Group fairness" + E0102 = "E0102: Individual fairness" + E0200 = "E0200: Explainability" + E0201 = "E0201: Global explanations" + E0202 = "E0202: Local explanations" + E0300 = "E0300: User actions" + E0301 = "E0301: Toxicity" + E0302 = "E0302: Polarization/ Exclusion" + E0400 = "E0400: Misinformation" + E0401 = "E0401: Deliberative Misinformation" + E0402 = "E0402: Generative Misinformation" + P0100 = "P0100: Data issues" + P0101 = "P0101: Data drift" + P0102 = "P0102: Concept drift" + P0103 = "P0103: Data entanglement" + P0104 = "P0104: Data quality issues" + P0105 = "P0105: Feedback loops" + P0200 = "P0200: Model issues" + P0201 = "P0201: Resilience/ Stability" + P0202 = "P0202: OOD generalization" + P0203 = "P0203: Scaling" + P0204 = "P0204: Accuracy" + P0300 = "P0300: Privacy" + P0301 = "P0301: Anonymization" + P0302 = "P0302: Randomization" + P0303 = "P0303: Encryption" + P0400 = "P0400: Safety" + P0401 = "P0401: Psychological Safety" + P0402 = "P0402: Physical safety" + P0403 = "P0403: Socioeconomic safety" + P0404 = "P0404: Environmental safety" + + class LifecycleEnum(str, Enum): """All (sub)categories of the lifecycle view of the AVID taxonomy.""" - L01 = 'L01: Business Understanding' - L02 = 'L02: Data Understanding' - L03 = 'L03: Data Preparation' - L04 = 'L04: Model Development' - L05 = 'L05: Evaluation' - L06 = 'L06: Deployment' + + L01 = "L01: Business Understanding" + L02 = "L02: Data Understanding" + L03 = "L03: Data Preparation" + L04 = "L04: Model Development" + L05 = "L05: Evaluation" + L06 = "L06: Deployment" + class ClassEnum(str, Enum): """All report/vulnerability classes.""" - aiid = 'AIID Incident' - atlas = 'ATLAS Case Study' - cve = 'CVE Entry' - llm = 'LLM Evaluation', - na = 'Undefined' - + + aiid = "AIID Incident" + atlas = "ATLAS Case Study" + cve = "CVE Entry" + llm = "LLM Evaluation" + na = "Undefined" + + class TypeEnum(str, Enum): """All report/vulnerability types.""" - issue = 'Issue' - advisory = 'Advisory' - measurement = 'Measurement' - detection = 'Detection' - + + issue = "Issue" + advisory = "Advisory" + measurement = "Measurement" + detection = "Detection" + + class MethodEnum(str, Enum): """The values a detection method can take.""" - test = 'Significance Test' - thres = 'Static Threshold' \ No newline at end of file + + test = "Significance Test" + thres = "Static Threshold" diff --git a/avidtools/datamodels/report.py b/avidtools/datamodels/report.py index 0aeee2a..23db839 100644 --- a/avidtools/datamodels/report.py +++ b/avidtools/datamodels/report.py @@ -2,59 +2,63 @@ Class definitions for AVID report. """ + from pydantic import BaseModel -from typing import List +from typing import List, Optional from datetime import date from .components import Affects, Problemtype, Metric, Reference, LangValue, Impact + class ReportMetadata(BaseModel): """Metadata class for a report.""" + report_id: str + class Report(BaseModel): """Top-level class to store an AVID report.""" - data_type: str = 'AVID' + data_type: str = "AVID" """Namespace for the report. Set to AVID by default, change this only if you're adopting these datamodels to stand up your own vulnerability database.""" - data_version: str = None + data_version: Optional[str] = None """Latest version of the data.""" - - metadata: ReportMetadata = None + + metadata: Optional[ReportMetadata] = None """Metadata for the report.""" - - affects: Affects = None + + affects: Optional[Affects] = None """Information on Artifact(s) affected by this report.""" - - problemtype: Problemtype = None + + problemtype: Optional[Problemtype] = None """Description of the problem a report is concerned with.""" - - metrics: List[Metric] = None + + metrics: Optional[List[Metric]] = None """Quantitative results pertaining to the issues raised in a specific report.""" - - references: List[Reference] = None + + references: Optional[List[Reference]] = None """References and their details.""" - - description: LangValue = None + + description: Optional[LangValue] = None """High-level description.""" - - impact: Impact = None + + impact: Optional[Impact] = None """Impact information, e.g. different taxonomy mappings, harm and severity scores.""" - - credit: List[LangValue] = None + + credit: Optional[List[LangValue]] = None """People credited for this report.""" - - reported_date: date = None + + reported_date: Optional[date] = None """Date reported.""" - + def save(self, location): """Save a report as a json file. - + Parameters ---------- location : str output *.json filename including location. """ with open(location, "w") as outfile: - outfile.write(self.json()) \ No newline at end of file + outfile.write(self.json()) diff --git a/avidtools/datamodels/vulnerability.py b/avidtools/datamodels/vulnerability.py index 10b5911..1707843 100644 --- a/avidtools/datamodels/vulnerability.py +++ b/avidtools/datamodels/vulnerability.py @@ -2,66 +2,72 @@ Class definitions for AVID vulnerability. """ + from pydantic import BaseModel -from typing import List +from typing import List, Optional from datetime import date from .components import Affects, AvidTaxonomy, Problemtype, Reference, LangValue, Impact from .enums import TypeEnum from .report import Report + class VulnMetadata(BaseModel): """Metadata class for a vulnerability.""" + vuln_id: str + class ReportSummary(BaseModel): """Summary of a report connected to a vuln.""" + report_id: str type: TypeEnum name: str + class Vulnerability(BaseModel): """Top-level class to store an AVID vulnerability.""" - data_type: str = 'AVID' + data_type: str = "AVID" """Namespace for the report. Set to AVID by default, change this only if you're adopting these datamodels to stand up your own vulnerability database.""" - data_version: str = None + data_version: Optional[str] = None """Latest version of the data.""" - metadata: VulnMetadata = None + metadata: Optional[VulnMetadata] = None """Metadata for the vuln.""" - affects: Affects = None + affects: Optional[Affects] = None """Information on Artifact(s) affected by this report.""" - - problemtype: Problemtype = None + + problemtype: Optional[Problemtype] = None """Description of the problem a report is concerned with.""" - - references: List[Reference] = None + + references: Optional[List[Reference]] = None """References and their details.""" - description: LangValue = None + description: Optional[LangValue] = None """High-level description.""" - reports: List[ReportSummary] = None + reports: Optional[List[ReportSummary]] = None """Brief summary of all reports connected to a vuln.""" - impact: Impact = None + impact: Optional[Impact] = None """Impact information, e.g. different taxonomy mappings, harm and severity scores.""" - credit: List[LangValue] = None + credit: Optional[List[LangValue]] = None """People credited for this vuln.""" - published_date: date = None + published_date: Optional[date] = None """Date published.""" - last_modified_date: date = None + last_modified_date: Optional[date] = None """Date last modified.""" - + def save(self, location): """Save a report as a json file. - + Parameters ---------- location : str @@ -69,7 +75,7 @@ def save(self, location): """ with open(location, "w") as outfile: outfile.write(self.json()) - + def ingest(self, report: Report): self.data_version = report.data_version self.affects = report.affects @@ -82,10 +88,10 @@ def ingest(self, report: Report): self.last_modified_date = date.today() if self.impact is not None: - if self.impact.avid is not None: # delete vuln_id field from report + if self.impact.avid is not None: # delete vuln_id field from report self.impact.avid = AvidTaxonomy( - risk_domain = self.impact.avid.risk_domain, - sep_view = self.impact.avid.sep_view, - lifecycle_view = self.impact.avid.lifecycle_view, - taxonomy_version = self.impact.avid.taxonomy_version + risk_domain=self.impact.avid.risk_domain, + sep_view=self.impact.avid.sep_view, + lifecycle_view=self.impact.avid.lifecycle_view, + taxonomy_version=self.impact.avid.taxonomy_version, ) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000..901eae7 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,15 @@ +# Global options: + +[mypy] +warn_return_any = True +warn_unused_configs = True +ignore_missing_imports = True +python_version = 3.12 + +# Per-module options: + +[mypy-avidtools.*] +disallow_untyped_defs = False + +[mypy-tests.*] +ignore_errors = True \ No newline at end of file diff --git a/notebooks/API_Tutorial.ipynb b/notebooks/API_Tutorial.ipynb index 2c859ab..72dc6f3 100644 --- a/notebooks/API_Tutorial.ipynb +++ b/notebooks/API_Tutorial.ipynb @@ -479,7 +479,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "base", "language": "python", "name": "python3" }, @@ -493,7 +493,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.12" + "version": "3.11.5" } }, "nbformat": 4, diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..1f2bc7b --- /dev/null +++ b/poetry.lock @@ -0,0 +1,788 @@ +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "certifi" +version = "2025.6.15" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057"}, + {file = "certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, + {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, + {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "sys_platform == \"win32\"" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "coverage" +version = "7.9.1" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "coverage-7.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cc94d7c5e8423920787c33d811c0be67b7be83c705f001f7180c7b186dcf10ca"}, + {file = "coverage-7.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16aa0830d0c08a2c40c264cef801db8bc4fc0e1892782e45bcacbd5889270509"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf95981b126f23db63e9dbe4cf65bd71f9a6305696fa5e2262693bc4e2183f5b"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f05031cf21699785cd47cb7485f67df619e7bcdae38e0fde40d23d3d0210d3c3"}, + {file = "coverage-7.9.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb4fbcab8764dc072cb651a4bcda4d11fb5658a1d8d68842a862a6610bd8cfa3"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0f16649a7330ec307942ed27d06ee7e7a38417144620bb3d6e9a18ded8a2d3e5"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cea0a27a89e6432705fffc178064503508e3c0184b4f061700e771a09de58187"}, + {file = "coverage-7.9.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e980b53a959fa53b6f05343afbd1e6f44a23ed6c23c4b4c56c6662bbb40c82ce"}, + {file = "coverage-7.9.1-cp310-cp310-win32.whl", hash = "sha256:70760b4c5560be6ca70d11f8988ee6542b003f982b32f83d5ac0b72476607b70"}, + {file = "coverage-7.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:a66e8f628b71f78c0e0342003d53b53101ba4e00ea8dabb799d9dba0abbbcebe"}, + {file = "coverage-7.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:95c765060e65c692da2d2f51a9499c5e9f5cf5453aeaf1420e3fc847cc060582"}, + {file = "coverage-7.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ba383dc6afd5ec5b7a0d0c23d38895db0e15bcba7fb0fa8901f245267ac30d86"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37ae0383f13cbdcf1e5e7014489b0d71cc0106458878ccde52e8a12ced4298ed"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69aa417a030bf11ec46149636314c24c8d60fadb12fc0ee8f10fda0d918c879d"}, + {file = "coverage-7.9.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a4be2a28656afe279b34d4f91c3e26eccf2f85500d4a4ff0b1f8b54bf807338"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:382e7ddd5289f140259b610e5f5c58f713d025cb2f66d0eb17e68d0a94278875"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e5532482344186c543c37bfad0ee6069e8ae4fc38d073b8bc836fc8f03c9e250"}, + {file = "coverage-7.9.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a39d18b3f50cc121d0ce3838d32d58bd1d15dab89c910358ebefc3665712256c"}, + {file = "coverage-7.9.1-cp311-cp311-win32.whl", hash = "sha256:dd24bd8d77c98557880def750782df77ab2b6885a18483dc8588792247174b32"}, + {file = "coverage-7.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:6b55ad10a35a21b8015eabddc9ba31eb590f54adc9cd39bcf09ff5349fd52125"}, + {file = "coverage-7.9.1-cp311-cp311-win_arm64.whl", hash = "sha256:6ad935f0016be24c0e97fc8c40c465f9c4b85cbbe6eac48934c0dc4d2568321e"}, + {file = "coverage-7.9.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a8de12b4b87c20de895f10567639c0797b621b22897b0af3ce4b4e204a743626"}, + {file = "coverage-7.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5add197315a054e92cee1b5f686a2bcba60c4c3e66ee3de77ace6c867bdee7cb"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:600a1d4106fe66f41e5d0136dfbc68fe7200a5cbe85610ddf094f8f22e1b0300"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a876e4c3e5a2a1715a6608906aa5a2e0475b9c0f68343c2ada98110512ab1d8"}, + {file = "coverage-7.9.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81f34346dd63010453922c8e628a52ea2d2ccd73cb2487f7700ac531b247c8a5"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:888f8eee13f2377ce86d44f338968eedec3291876b0b8a7289247ba52cb984cd"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9969ef1e69b8c8e1e70d591f91bbc37fc9a3621e447525d1602801a24ceda898"}, + {file = "coverage-7.9.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:60c458224331ee3f1a5b472773e4a085cc27a86a0b48205409d364272d67140d"}, + {file = "coverage-7.9.1-cp312-cp312-win32.whl", hash = "sha256:5f646a99a8c2b3ff4c6a6e081f78fad0dde275cd59f8f49dc4eab2e394332e74"}, + {file = "coverage-7.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:30f445f85c353090b83e552dcbbdad3ec84c7967e108c3ae54556ca69955563e"}, + {file = "coverage-7.9.1-cp312-cp312-win_arm64.whl", hash = "sha256:af41da5dca398d3474129c58cb2b106a5d93bbb196be0d307ac82311ca234342"}, + {file = "coverage-7.9.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:31324f18d5969feef7344a932c32428a2d1a3e50b15a6404e97cba1cc9b2c631"}, + {file = "coverage-7.9.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0c804506d624e8a20fb3108764c52e0eef664e29d21692afa375e0dd98dc384f"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef64c27bc40189f36fcc50c3fb8f16ccda73b6a0b80d9bd6e6ce4cffcd810bbd"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4fe2348cc6ec372e25adec0219ee2334a68d2f5222e0cba9c0d613394e12d86"}, + {file = "coverage-7.9.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34ed2186fe52fcc24d4561041979a0dec69adae7bce2ae8d1c49eace13e55c43"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:25308bd3d00d5eedd5ae7d4357161f4df743e3c0240fa773ee1b0f75e6c7c0f1"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:73e9439310f65d55a5a1e0564b48e34f5369bee943d72c88378f2d576f5a5751"}, + {file = "coverage-7.9.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ab6be0859141b53aa89412a82454b482c81cf750de4f29223d52268a86de67"}, + {file = "coverage-7.9.1-cp313-cp313-win32.whl", hash = "sha256:64bdd969456e2d02a8b08aa047a92d269c7ac1f47e0c977675d550c9a0863643"}, + {file = "coverage-7.9.1-cp313-cp313-win_amd64.whl", hash = "sha256:be9e3f68ca9edb897c2184ad0eee815c635565dbe7a0e7e814dc1f7cbab92c0a"}, + {file = "coverage-7.9.1-cp313-cp313-win_arm64.whl", hash = "sha256:1c503289ffef1d5105d91bbb4d62cbe4b14bec4d13ca225f9c73cde9bb46207d"}, + {file = "coverage-7.9.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0b3496922cb5f4215bf5caaef4cf12364a26b0be82e9ed6d050f3352cf2d7ef0"}, + {file = "coverage-7.9.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9565c3ab1c93310569ec0d86b017f128f027cab0b622b7af288696d7ed43a16d"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2241ad5dbf79ae1d9c08fe52b36d03ca122fb9ac6bca0f34439e99f8327ac89f"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bb5838701ca68b10ebc0937dbd0eb81974bac54447c55cd58dea5bca8451029"}, + {file = "coverage-7.9.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b30a25f814591a8c0c5372c11ac8967f669b97444c47fd794926e175c4047ece"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2d04b16a6062516df97969f1ae7efd0de9c31eb6ebdceaa0d213b21c0ca1a683"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7931b9e249edefb07cd6ae10c702788546341d5fe44db5b6108a25da4dca513f"}, + {file = "coverage-7.9.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52e92b01041151bf607ee858e5a56c62d4b70f4dac85b8c8cb7fb8a351ab2c10"}, + {file = "coverage-7.9.1-cp313-cp313t-win32.whl", hash = "sha256:684e2110ed84fd1ca5f40e89aa44adf1729dc85444004111aa01866507adf363"}, + {file = "coverage-7.9.1-cp313-cp313t-win_amd64.whl", hash = "sha256:437c576979e4db840539674e68c84b3cda82bc824dd138d56bead1435f1cb5d7"}, + {file = "coverage-7.9.1-cp313-cp313t-win_arm64.whl", hash = "sha256:18a0912944d70aaf5f399e350445738a1a20b50fbea788f640751c2ed9208b6c"}, + {file = "coverage-7.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6f424507f57878e424d9a95dc4ead3fbdd72fd201e404e861e465f28ea469951"}, + {file = "coverage-7.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:535fde4001b2783ac80865d90e7cc7798b6b126f4cd8a8c54acfe76804e54e58"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02532fd3290bb8fa6bec876520842428e2a6ed6c27014eca81b031c2d30e3f71"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56f5eb308b17bca3bbff810f55ee26d51926d9f89ba92707ee41d3c061257e55"}, + {file = "coverage-7.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfa447506c1a52271f1b0de3f42ea0fa14676052549095e378d5bff1c505ff7b"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9ca8e220006966b4a7b68e8984a6aee645a0384b0769e829ba60281fe61ec4f7"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:49f1d0788ba5b7ba65933f3a18864117c6506619f5ca80326b478f72acf3f385"}, + {file = "coverage-7.9.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:68cd53aec6f45b8e4724c0950ce86eacb775c6be01ce6e3669fe4f3a21e768ed"}, + {file = "coverage-7.9.1-cp39-cp39-win32.whl", hash = "sha256:95335095b6c7b1cc14c3f3f17d5452ce677e8490d101698562b2ffcacc304c8d"}, + {file = "coverage-7.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:e1b5191d1648acc439b24721caab2fd0c86679d8549ed2c84d5a7ec1bedcc244"}, + {file = "coverage-7.9.1-pp39.pp310.pp311-none-any.whl", hash = "sha256:db0f04118d1db74db6c9e1cb1898532c7dcc220f1d2718f058601f7c3f499514"}, + {file = "coverage-7.9.1-py3-none-any.whl", hash = "sha256:66b974b145aa189516b6bf2d8423e888b742517d37872f6ee4c5be0073bd9a3c"}, + {file = "coverage-7.9.1.tar.gz", hash = "sha256:6cf43c78c4282708a28e466316935ec7489a9c487518a77fa68f716c67909cec"}, +] + +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main", "dev"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "mypy" +version = "1.16.1" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "mypy-1.16.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b4f0fed1022a63c6fec38f28b7fc77fca47fd490445c69d0a66266c59dd0b88a"}, + {file = "mypy-1.16.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:86042bbf9f5a05ea000d3203cf87aa9d0ccf9a01f73f71c58979eb9249f46d72"}, + {file = "mypy-1.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ea7469ee5902c95542bea7ee545f7006508c65c8c54b06dc2c92676ce526f3ea"}, + {file = "mypy-1.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:352025753ef6a83cb9e7f2427319bb7875d1fdda8439d1e23de12ab164179574"}, + {file = "mypy-1.16.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ff9fa5b16e4c1364eb89a4d16bcda9987f05d39604e1e6c35378a2987c1aac2d"}, + {file = "mypy-1.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:1256688e284632382f8f3b9e2123df7d279f603c561f099758e66dd6ed4e8bd6"}, + {file = "mypy-1.16.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:472e4e4c100062488ec643f6162dd0d5208e33e2f34544e1fc931372e806c0cc"}, + {file = "mypy-1.16.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea16e2a7d2714277e349e24d19a782a663a34ed60864006e8585db08f8ad1782"}, + {file = "mypy-1.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08e850ea22adc4d8a4014651575567b0318ede51e8e9fe7a68f25391af699507"}, + {file = "mypy-1.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22d76a63a42619bfb90122889b903519149879ddbf2ba4251834727944c8baca"}, + {file = "mypy-1.16.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2c7ce0662b6b9dc8f4ed86eb7a5d505ee3298c04b40ec13b30e572c0e5ae17c4"}, + {file = "mypy-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:211287e98e05352a2e1d4e8759c5490925a7c784ddc84207f4714822f8cf99b6"}, + {file = "mypy-1.16.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:af4792433f09575d9eeca5c63d7d90ca4aeceda9d8355e136f80f8967639183d"}, + {file = "mypy-1.16.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66df38405fd8466ce3517eda1f6640611a0b8e70895e2a9462d1d4323c5eb4b9"}, + {file = "mypy-1.16.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44e7acddb3c48bd2713994d098729494117803616e116032af192871aed80b79"}, + {file = "mypy-1.16.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ab5eca37b50188163fa7c1b73c685ac66c4e9bdee4a85c9adac0e91d8895e15"}, + {file = "mypy-1.16.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb6229b2c9086247e21a83c309754b9058b438704ad2f6807f0d8227f6ebdd"}, + {file = "mypy-1.16.1-cp312-cp312-win_amd64.whl", hash = "sha256:1f0435cf920e287ff68af3d10a118a73f212deb2ce087619eb4e648116d1fe9b"}, + {file = "mypy-1.16.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ddc91eb318c8751c69ddb200a5937f1232ee8efb4e64e9f4bc475a33719de438"}, + {file = "mypy-1.16.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:87ff2c13d58bdc4bbe7dc0dedfe622c0f04e2cb2a492269f3b418df2de05c536"}, + {file = "mypy-1.16.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a7cfb0fe29fe5a9841b7c8ee6dffb52382c45acdf68f032145b75620acfbd6f"}, + {file = "mypy-1.16.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:051e1677689c9d9578b9c7f4d206d763f9bbd95723cd1416fad50db49d52f359"}, + {file = "mypy-1.16.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d5d2309511cc56c021b4b4e462907c2b12f669b2dbeb68300110ec27723971be"}, + {file = "mypy-1.16.1-cp313-cp313-win_amd64.whl", hash = "sha256:4f58ac32771341e38a853c5d0ec0dfe27e18e27da9cdb8bbc882d2249c71a3ee"}, + {file = "mypy-1.16.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7fc688329af6a287567f45cc1cefb9db662defeb14625213a5b7da6e692e2069"}, + {file = "mypy-1.16.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e198ab3f55924c03ead626ff424cad1732d0d391478dfbf7bb97b34602395da"}, + {file = "mypy-1.16.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09aa4f91ada245f0a45dbc47e548fd94e0dd5a8433e0114917dc3b526912a30c"}, + {file = "mypy-1.16.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13c7cd5b1cb2909aa318a90fd1b7e31f17c50b242953e7dd58345b2a814f6383"}, + {file = "mypy-1.16.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:58e07fb958bc5d752a280da0e890c538f1515b79a65757bbdc54252ba82e0b40"}, + {file = "mypy-1.16.1-cp39-cp39-win_amd64.whl", hash = "sha256:f895078594d918f93337a505f8add9bd654d1a24962b4c6ed9390e12531eb31b"}, + {file = "mypy-1.16.1-py3-none-any.whl", hash = "sha256:5fc2ac4027d0ef28d6ba69a0343737a23c4d1b83672bf38d1fe237bdc0643b37"}, + {file = "mypy-1.16.1.tar.gz", hash = "sha256:6bd00a0a2094841c5e47e7374bb42b83d64c527a502e3334e1173a0c24437bab"}, +] + +[package.dependencies] +mypy_extensions = ">=1.0.0" +pathspec = ">=0.9.0" +typing_extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, + {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, +] + +[[package]] +name = "nvdlib" +version = "0.8.1" +description = "National Vulnerability Database CPE/CVE API Library for Python" +optional = false +python-versions = ">=3.11.0" +groups = ["main"] +files = [ + {file = "nvdlib-0.8.1-py3-none-any.whl", hash = "sha256:4f92a4ca242310cf2099d594bbe910ac8009a8adde0641d0e58f90c1c83ffed9"}, +] + +[package.dependencies] +requests = "*" + +[package.extras] +dev = ["pytest (==7.0.1)", "responses (==0.18.0)"] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + +[[package]] +name = "pydantic" +version = "2.11.7" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, + {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.33.2" +typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pygments" +version = "2.19.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pytest" +version = "8.4.1" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7"}, + {file = "pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c"}, +] + +[package.dependencies] +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +iniconfig = ">=1" +packaging = ">=20" +pluggy = ">=1.5,<2" +pygments = ">=2.7.2" + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-cov" +version = "4.1.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, + {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, +] + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] + +[[package]] +name = "pytest-mock" +version = "3.14.1" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0"}, + {file = "pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "requests" +version = "2.32.4" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, + {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset_normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "responses" +version = "0.24.1" +description = "A utility library for mocking out the `requests` Python library." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "responses-0.24.1-py3-none-any.whl", hash = "sha256:a2b43f4c08bfb9c9bd242568328c65a34b318741d3fab884ac843c5ceeb543f9"}, + {file = "responses-0.24.1.tar.gz", hash = "sha256:b127c6ca3f8df0eb9cc82fd93109a3007a86acb24871834c47b77765152ecf8c"}, +] + +[package.dependencies] +pyyaml = "*" +requests = ">=2.30.0,<3.0" +urllib3 = ">=1.25.10,<3.0" + +[package.extras] +tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli ; python_version < \"3.11\"", "tomli-w", "types-PyYAML", "types-requests"] + +[[package]] +name = "ruff" +version = "0.11.13" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "ruff-0.11.13-py3-none-linux_armv6l.whl", hash = "sha256:4bdfbf1240533f40042ec00c9e09a3aade6f8c10b6414cf11b519488d2635d46"}, + {file = "ruff-0.11.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aef9c9ed1b5ca28bb15c7eac83b8670cf3b20b478195bd49c8d756ba0a36cf48"}, + {file = "ruff-0.11.13-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53b15a9dfdce029c842e9a5aebc3855e9ab7771395979ff85b7c1dedb53ddc2b"}, + {file = "ruff-0.11.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab153241400789138d13f362c43f7edecc0edfffce2afa6a68434000ecd8f69a"}, + {file = "ruff-0.11.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c51f93029d54a910d3d24f7dd0bb909e31b6cd989a5e4ac513f4eb41629f0dc"}, + {file = "ruff-0.11.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1808b3ed53e1a777c2ef733aca9051dc9bf7c99b26ece15cb59a0320fbdbd629"}, + {file = "ruff-0.11.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d28ce58b5ecf0f43c1b71edffabe6ed7f245d5336b17805803312ec9bc665933"}, + {file = "ruff-0.11.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55e4bc3a77842da33c16d55b32c6cac1ec5fb0fbec9c8c513bdce76c4f922165"}, + {file = "ruff-0.11.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:633bf2c6f35678c56ec73189ba6fa19ff1c5e4807a78bf60ef487b9dd272cc71"}, + {file = "ruff-0.11.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ffbc82d70424b275b089166310448051afdc6e914fdab90e08df66c43bb5ca9"}, + {file = "ruff-0.11.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a9ddd3ec62a9a89578c85842b836e4ac832d4a2e0bfaad3b02243f930ceafcc"}, + {file = "ruff-0.11.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d237a496e0778d719efb05058c64d28b757c77824e04ffe8796c7436e26712b7"}, + {file = "ruff-0.11.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26816a218ca6ef02142343fd24c70f7cd8c5aa6c203bca284407adf675984432"}, + {file = "ruff-0.11.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:51c3f95abd9331dc5b87c47ac7f376db5616041173826dfd556cfe3d4977f492"}, + {file = "ruff-0.11.13-py3-none-win32.whl", hash = "sha256:96c27935418e4e8e77a26bb05962817f28b8ef3843a6c6cc49d8783b5507f250"}, + {file = "ruff-0.11.13-py3-none-win_amd64.whl", hash = "sha256:29c3189895a8a6a657b7af4e97d330c8a3afd2c9c8f46c81e2fc5a31866517e3"}, + {file = "ruff-0.11.13-py3-none-win_arm64.whl", hash = "sha256:b4385285e9179d608ff1d2fb9922062663c658605819a6876d8beef0c30b7f3b"}, + {file = "ruff-0.11.13.tar.gz", hash = "sha256:26fa247dc68d1d4e72c179e08889a25ac0c7ba4d78aecfc835d49cbfd60bf514"}, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.20250516" +description = "Typing stubs for PyYAML" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_pyyaml-6.0.12.20250516-py3-none-any.whl", hash = "sha256:8478208feaeb53a34cb5d970c56a7cd76b72659442e733e268a94dc72b2d0530"}, + {file = "types_pyyaml-6.0.12.20250516.tar.gz", hash = "sha256:9f21a70216fc0fa1b216a8176db5f9e0af6eb35d2f2932acb87689d03a5bf6ba"}, +] + +[[package]] +name = "types-requests" +version = "2.32.4.20250611" +description = "Typing stubs for requests" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_requests-2.32.4.20250611-py3-none-any.whl", hash = "sha256:ad2fe5d3b0cb3c2c902c8815a70e7fb2302c4b8c1f77bdcd738192cdb3878072"}, + {file = "types_requests-2.32.4.20250611.tar.gz", hash = "sha256:741c8777ed6425830bf51e54d6abe245f79b4dcb9019f1622b773463946bf826"}, +] + +[package.dependencies] +urllib3 = ">=2" + +[[package]] +name = "typing-extensions" +version = "4.14.0" +description = "Backported and Experimental Type Hints for Python 3.9+" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, + {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, + {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + +[[package]] +name = "urllib3" +version = "2.5.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[metadata] +lock-version = "2.1" +python-versions = "^3.12" +content-hash = "6a1495932ba2ca9f4bc5ed70701d1588a0b5a0a7f263d263a176785da5e0fd56" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..7ab0ca6 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,83 @@ +[project] +name = "avidtools" +version = "0.2.0" +description = "Developer tools for AVID" +authors = [ + {name = "Subho Majumdar", email = "subho@avidml.org"}, + {name = "Harsh Raj", email = "harsh@avidml.org"}, + {name = "Carol Anderson", email = "carol@avidml.org"}, + {name = "Nathan Butters", email = "nathan@avidml.org"} +] +readme = "README.md" +license = {text = "Apache-2.0"} +keywords = ["ai", "vulnerability", "security", "machine-learning", "avid"] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3.12", + "Topic :: Security", + "Topic :: Software Development :: Libraries :: Python Modules", +] +requires-python = ">=3.12" + +[project.urls] +Homepage = "https://github.com/avidml/avidtools" +Repository = "https://github.com/avidml/avidtools" +Documentation = "https://github.com/avidml/avidtools" +"Bug Tracker" = "https://github.com/avidml/avidtools/issues" + +[tool.poetry.dependencies] +python = "^3.12" +pydantic = "^2.11.5" +typing-extensions = "^4.13.2" +nvdlib = "^0.8.1" + +[tool.poetry.group.dev.dependencies] +mypy = "^1.16.0" +ruff = "^0.11.12" +pytest = "^8.0.0" +pytest-cov = "^4.0.0" +pytest-mock = "^3.12.0" +responses = "^0.24.0" +types-requests = "^2.32.4.20250611" +types-pyyaml = "^6.0.12.20250516" + +[build-system] +requires = ["poetry-core>=2.0.0,<3.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +testpaths = ["tests"] +python_files = ["test_*.py", "*_test.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +addopts = [ + "--strict-markers", + "--strict-config", + "--cov=avidtools", + "--cov-report=term-missing", + "--cov-report=html", + "--cov-report=xml", +] +markers = [ + "integration: marks tests as integration tests", + "unit: marks tests as unit tests", +] + +[tool.coverage.run] +source = ["avidtools"] +omit = [ + "tests/*", + "*/test_*", +] + +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", + "def __repr__", + "raise AssertionError", + "raise NotImplementedError", + "if __name__ == .__main__.:", +] + diff --git a/setup.py b/setup.py index 981c523..16f1915 100644 --- a/setup.py +++ b/setup.py @@ -2,16 +2,15 @@ setup( name='avidtools', - version='0.1.2', + version='0.2.0', description='Developer tools for AVID', author='Subho Majumdar', author_email='info@avidml.org', packages=find_packages(exclude=['.']), + python_requires='>=3.12', install_requires=[ - 'pydantic', - 'typing;python_version<"3.5"', - 'typing_extensions', - 'datetime', - 'nvdlib' + 'pydantic>=2.11.5', + 'typing-extensions>=4.13.2', + 'nvdlib>=0.8.1' ], ) diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..99e9665 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,161 @@ +# Testing + +This document describes how to run and contribute to the test suite for avidtools. + +## Test Structure + +The test suite is organized as follows: + +``` +tests/ +├── conftest.py # Test configuration and fixtures +├── unit/ # Unit tests +│ ├── datamodels/ # Tests for data models +│ │ ├── test_components.py # Component data model tests +│ │ └── test_enums.py # Enum tests +│ └── connectors/ # Tests for connectors +│ ├── test_atlas.py # ATLAS connector tests +│ ├── test_cve.py # CVE connector tests +│ └── test_inspect.py # Inspect connector tests +└── integration/ # Integration tests (future) +``` + +## Running Tests + +### All Tests +```bash +make test +# or +poetry run pytest tests/ +``` + +### Specific Test Categories +```bash +# Unit tests only +poetry run pytest tests/unit/ + +# Data model tests +poetry run pytest tests/unit/datamodels/ + +# Connector tests +poetry run pytest tests/unit/connectors/ + +# Specific connector +poetry run pytest tests/unit/connectors/test_atlas.py +``` + +### With Coverage +```bash +poetry run pytest tests/ --cov=avidtools --cov-report=html +``` + +### Verbose Output +```bash +poetry run pytest tests/ -v +``` + +## Test Configuration + +Tests are configured in `pyproject.toml` with the following settings: + +- **Test paths**: `tests/` directory +- **Coverage**: Enabled with HTML, XML, and terminal reports +- **Markers**: `unit` and `integration` for categorizing tests +- **Coverage target**: 80%+ code coverage + +## Writing Tests + +### Test Fixtures + +Common test fixtures are defined in `tests/conftest.py`: + +- `sample_atlas_case_study`: Mock ATLAS case study data +- `sample_cve_data`: Mock CVE data +- `mock_requests_get`: Mock HTTP requests +- `mock_nvdlib_search`: Mock NVD library calls + +### Test Categories + +Use pytest markers to categorize tests: + +```python +import pytest + +@pytest.mark.unit +def test_component_creation(): + """Unit test for component creation.""" + pass + +@pytest.mark.integration +def test_full_workflow(): + """Integration test for full workflow.""" + pass +``` + +### Mocking External Dependencies + +Tests use `pytest-mock` and `responses` for mocking: + +```python +from unittest.mock import Mock, patch +import responses + +# Mock HTTP calls +@responses.activate +def test_api_call(): + responses.add(responses.GET, "https://api.example.com", json={"data": "test"}) + # Test code here + +# Mock function calls +@patch('avidtools.module.function') +def test_function_call(mock_function): + mock_function.return_value = "test" + # Test code here +``` + +## CI/CD Integration + +Tests are automatically run in GitHub Actions on: + +- Pull requests to `main` branch +- The workflow includes: + 1. Python setup (3.12) + 2. Poetry installation and dependency setup + 3. Type checking with mypy + 4. Linting with ruff + 5. Test execution with coverage reporting + +## Coverage Reports + +After running tests with coverage, reports are generated in: + +- **HTML**: `htmlcov/index.html` - Interactive web report +- **XML**: `coverage.xml` - For CI/CD systems +- **Terminal**: Displayed in console output + +## Best Practices + +1. **Test Naming**: Use descriptive names that explain what is being tested +2. **Arrange-Act-Assert**: Structure tests with clear setup, execution, and verification +3. **Mocking**: Mock external dependencies to ensure tests are isolated and fast +4. **Coverage**: Aim for high test coverage but focus on meaningful tests +5. **Documentation**: Add docstrings to test classes and complex test methods + +## Debugging Tests + +### Run with Debug Info +```bash +poetry run pytest tests/ -v -s --tb=long +``` + +### Run Specific Test +```bash +poetry run pytest tests/unit/datamodels/test_components.py::TestLangValue::test_lang_value_creation -v +``` + +### Drop into Debugger +```python +def test_something(): + import pdb; pdb.set_trace() + # Test code here +``` diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..73d90cd --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +# Test package initialization diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..730da1a --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,58 @@ +""" +Test configuration and fixtures for avidtools. +""" + +import pytest +from typing import Dict, Any + + +@pytest.fixture +def sample_atlas_case_study() -> Dict[str, Any]: + """Sample ATLAS case study data for testing.""" + return { + "id": "AML.CS0001", + "name": "Sample ATLAS Case Study", + "target": "ML Model System", + "summary": "This is a sample case study for testing purposes.", + "incident-date": "2023-01-15", + "reporter": "Test Reporter", + "references": [ + { + "title": "Reference 1", + "url": "https://example.com/ref1" + }, + { + "title": "Reference 2", + "url": "https://example.com/ref2" + } + ] + } + + +@pytest.fixture +def sample_cve_data() -> Dict[str, Any]: + """Sample CVE data for testing.""" + return { + "id": "CVE-2023-12345", + "descriptions": [ + {"lang": "en", "value": "Test CVE description"} + ], + "published": "2023-01-15T10:00:00.000Z", + "lastModified": "2023-01-16T12:00:00.000Z", + "sourceIdentifier": "test@example.com", + "url": "https://nvd.nist.gov/vuln/detail/CVE-2023-12345", + "cpe": [], + "references": [] + } + + +@pytest.fixture +def mock_requests_get(mocker): + """Mock requests.get for testing external API calls.""" + return mocker.patch("requests.get") + + +@pytest.fixture +def mock_nvdlib_search(mocker): + """Mock nvdlib.searchCVE for testing CVE imports.""" + return mocker.patch("nvdlib.searchCVE") diff --git a/tests/integration/test_atlas_integration.py b/tests/integration/test_atlas_integration.py new file mode 100644 index 0000000..9b1645c --- /dev/null +++ b/tests/integration/test_atlas_integration.py @@ -0,0 +1,99 @@ +""" +Integration tests for avidtools. + +These tests verify that different components work together correctly. +""" + +import pytest +from unittest.mock import Mock, patch +import yaml + +from avidtools.connectors.atlas import import_case_study, convert_case_study +from avidtools.datamodels.report import Report + + +class TestAtlasIntegration: + """Integration tests for ATLAS workflow.""" + + @pytest.mark.integration + @patch('avidtools.connectors.atlas.requests.get') + def test_full_atlas_workflow(self, mock_get): + """Test complete ATLAS case study import and conversion workflow.""" + # Mock HTTP response with realistic ATLAS data + case_study_data = { + "id": "AML.CS0001", + "name": "VirusTotal Poisoning", + "target": "VirusTotal", + "summary": ( + "Attackers submit clean files followed by malicious files " + "with the same hash." + ), + "incident-date": "2020-01-01", + "reporter": "MITRE", + "references": [ + { + "title": "VirusTotal Documentation", + "url": "https://developers.virustotal.com/" + } + ] + } + + # Mock the HTTP response + mock_response = Mock() + mock_response.content = yaml.dump(case_study_data).encode() + mock_get.return_value = mock_response + + # Test the full workflow + case_study_id = "AML.CS0001" + + # Step 1: Import case study + imported_data = import_case_study(case_study_id) + assert imported_data["id"] == case_study_id + assert imported_data["name"] == "VirusTotal Poisoning" + + # Step 2: Convert to AVID report + report = convert_case_study(imported_data) + assert isinstance(report, Report) + assert report.data_type == "AVID" + + # Step 3: Verify all data is properly converted + assert report.affects.deployer == ["VirusTotal"] + assert report.problemtype.description.value == "VirusTotal Poisoning" + assert report.description.value.startswith("Attackers submit clean files") + assert len(report.references) == 2 # Main + 1 additional + assert report.credit[0].value == "MITRE" + assert report.reported_date == "2020-01-01" + + # Verify HTTP call was made correctly + expected_url = ( + "https://raw.githubusercontent.com/mitre-atlas/atlas-data/main/" + f"data/case-studies/{case_study_id}.yaml" + ) + mock_get.assert_called_once_with(expected_url) + + @pytest.mark.integration + def test_report_serialization(self): + """Test that converted reports can be serialized properly.""" + # Create a minimal case study + case_study = { + "id": "AML.CS0002", + "name": "Test Case", + "target": "Test Target", + "summary": "Test summary", + "incident-date": "2023-01-01", + "references": [] + } + + # Convert to report + report = convert_case_study(case_study) + + # Test JSON serialization + json_data = report.model_dump_json() + assert isinstance(json_data, str) + assert "AVID" in json_data + assert "Test Case" in json_data + + # Test that serialized data can be loaded back + report_dict = report.model_dump() + assert report_dict["data_type"] == "AVID" + assert report_dict["problemtype"]["description"]["value"] == "Test Case" diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000..4a5d263 --- /dev/null +++ b/tests/unit/__init__.py @@ -0,0 +1 @@ +# Unit tests package diff --git a/tests/unit/connectors/__init__.py b/tests/unit/connectors/__init__.py new file mode 100644 index 0000000..acc0ac3 --- /dev/null +++ b/tests/unit/connectors/__init__.py @@ -0,0 +1 @@ +# Connectors unit tests diff --git a/tests/unit/connectors/test_atlas.py b/tests/unit/connectors/test_atlas.py new file mode 100644 index 0000000..83602b2 --- /dev/null +++ b/tests/unit/connectors/test_atlas.py @@ -0,0 +1,162 @@ +""" +Unit tests for ATLAS connector. +""" + +import yaml +import responses + +from avidtools.connectors.atlas import import_case_study, convert_case_study +from avidtools.datamodels.report import Report +from avidtools.datamodels.enums import ArtifactTypeEnum, ClassEnum, TypeEnum + + +class TestAtlasConnector: + """Test cases for ATLAS connector functions.""" + + @responses.activate + def test_import_case_study_success(self): + """Test successful import of ATLAS case study.""" + case_study_id = "AML.CS0001" + mock_data = { + "id": case_study_id, + "name": "Test Case Study", + "target": "ML System", + "summary": "Test summary" + } + + # Mock the HTTP response + url = ( + "https://raw.githubusercontent.com/mitre-atlas/atlas-data/main/" + f"data/case-studies/{case_study_id}.yaml" + ) + responses.add( + responses.GET, + url, + body=yaml.dump(mock_data), + status=200 + ) + + result = import_case_study(case_study_id) + assert result["id"] == case_study_id + assert result["name"] == "Test Case Study" + + @responses.activate + def test_import_case_study_http_error(self): + """Test import case study with HTTP error.""" + case_study_id = "AML.CS9999" + + # Mock HTTP error response + url = ( + "https://raw.githubusercontent.com/mitre-atlas/atlas-data/main/" + f"data/case-studies/{case_study_id}.yaml" + ) + responses.add( + responses.GET, + url, + status=404 + ) + + # The function doesn't currently raise an exception for HTTP errors + # It returns whatever yaml.safe_load returns for the error content + result = import_case_study(case_study_id) + # With a 404, the content will be empty and yaml.safe_load returns None + assert result is None + + def test_convert_case_study_basic(self, sample_atlas_case_study): + """Test basic conversion of ATLAS case study to AVID report.""" + report = convert_case_study(sample_atlas_case_study) + + assert isinstance(report, Report) + assert report.data_type == "AVID" + + # Check affects + assert report.affects is not None + assert report.affects.developer == [] + assert report.affects.deployer == [sample_atlas_case_study["target"]] + assert len(report.affects.artifacts) == 1 + assert report.affects.artifacts[0].type == ArtifactTypeEnum.system + assert ( + report.affects.artifacts[0].name == + sample_atlas_case_study["target"] + ) + + # Check problemtype + assert report.problemtype is not None + assert report.problemtype.classof == ClassEnum.atlas + assert report.problemtype.type == TypeEnum.advisory + assert report.problemtype.description.lang == "eng" + assert ( + report.problemtype.description.value == + sample_atlas_case_study["name"] + ) + + def test_convert_case_study_references(self, sample_atlas_case_study): + """Test conversion of references in ATLAS case study.""" + report = convert_case_study(sample_atlas_case_study) + + assert report.references is not None + assert len(report.references) == 3 # 1 source + 2 from references + + # Check main reference + main_ref = report.references[0] + assert main_ref.type == "source" + assert main_ref.label == sample_atlas_case_study["name"] + expected_url = ( + f"https://atlas.mitre.org/studies/" + f"{sample_atlas_case_study['id']}" + ) + assert main_ref.url == expected_url + + # Check additional references + for i, ref in enumerate(report.references[1:], 1): + expected_ref = sample_atlas_case_study["references"][i-1] + assert ref.type == "source" + assert ref.label == expected_ref["title"] + assert ref.url == expected_ref["url"] + + def test_convert_case_study_description(self, sample_atlas_case_study): + """Test conversion of description in ATLAS case study.""" + report = convert_case_study(sample_atlas_case_study) + + assert report.description is not None + assert report.description.lang == "eng" + assert report.description.value == sample_atlas_case_study["summary"] + + def test_convert_case_study_with_reporter(self, sample_atlas_case_study): + """Test conversion with reporter information.""" + report = convert_case_study(sample_atlas_case_study) + + assert report.credit is not None + assert len(report.credit) == 1 + assert report.credit[0].lang == "eng" + assert report.credit[0].value == sample_atlas_case_study["reporter"] + + def test_convert_case_study_without_reporter( + self, sample_atlas_case_study + ): + """Test conversion without reporter information.""" + case_study_no_reporter = sample_atlas_case_study.copy() + del case_study_no_reporter["reporter"] + + report = convert_case_study(case_study_no_reporter) + + # Credit should be None when no reporter is provided + assert report.credit is None + + def test_convert_case_study_reported_date(self, sample_atlas_case_study): + """Test conversion of reported date.""" + report = convert_case_study(sample_atlas_case_study) + + assert report.reported_date == sample_atlas_case_study["incident-date"] + + def test_convert_case_study_empty_references( + self, sample_atlas_case_study + ): + """Test conversion with empty references list.""" + case_study_empty_refs = sample_atlas_case_study.copy() + case_study_empty_refs["references"] = [] + + report = convert_case_study(case_study_empty_refs) + + assert report.references is not None + assert len(report.references) == 1 # Only the main reference diff --git a/tests/unit/connectors/test_cve.py b/tests/unit/connectors/test_cve.py new file mode 100644 index 0000000..48c5f4a --- /dev/null +++ b/tests/unit/connectors/test_cve.py @@ -0,0 +1,168 @@ +""" +Unit tests for CVE connector. +""" + +import pytest +from unittest.mock import Mock, patch +from datetime import date + +from avidtools.connectors.cve import import_cve, convert_cve +from avidtools.datamodels.vulnerability import Vulnerability +from avidtools.datamodels.enums import ArtifactTypeEnum, ClassEnum, TypeEnum + + +class MockCVE: + """Mock CVE object for testing.""" + + def __init__(self, cve_data): + self.id = cve_data["id"] + self.descriptions = [ + Mock(value=desc["value"]) for desc in cve_data["descriptions"] + ] + self.published = cve_data["published"] + self.lastModified = cve_data["lastModified"] + self.sourceIdentifier = cve_data["sourceIdentifier"] + self.url = cve_data["url"] + self.cpe = [Mock(criteria=cpe) for cpe in cve_data.get("cpe", [])] + self.references = [ + Mock(url=ref) for ref in cve_data.get("references", []) + ] + + +class TestCVEConnector: + """Test cases for CVE connector functions.""" + + @patch('avidtools.connectors.cve.nvdlib.searchCVE') + def test_import_cve_success(self, mock_search): + """Test successful import of CVE.""" + cve_id = "CVE-2023-12345" + mock_cve = MockCVE({ + "id": cve_id, + "descriptions": [{"value": "Test description"}], + "published": "2023-01-15T10:00:00.000Z", + "lastModified": "2023-01-16T12:00:00.000Z", + "sourceIdentifier": "test@example.com", + "url": "https://nvd.nist.gov/vuln/detail/CVE-2023-12345" + }) + + mock_search.return_value = [mock_cve] + + result = import_cve(cve_id) + assert result.id == cve_id + mock_search.assert_called_once_with(cveId=cve_id) + + @patch('avidtools.connectors.cve.nvdlib.searchCVE') + def test_import_cve_not_found(self, mock_search): + """Test import CVE when not found.""" + mock_search.return_value = [] + + with pytest.raises(IndexError): + import_cve("CVE-9999-99999") + + def test_convert_cve_basic(self, sample_cve_data): + """Test basic conversion of CVE to AVID vulnerability.""" + mock_cve = MockCVE(sample_cve_data) + vuln = convert_cve(mock_cve) + + assert isinstance(vuln, Vulnerability) + assert vuln.data_type == "AVID" + + # Check problemtype + assert vuln.problemtype is not None + assert vuln.problemtype.classof == ClassEnum.cve + assert vuln.problemtype.type == TypeEnum.advisory + assert vuln.problemtype.description.lang == "eng" + assert vuln.problemtype.description.value == "Test CVE description" + + def test_convert_cve_with_cpe(self, sample_cve_data): + """Test CVE conversion with CPE data.""" + sample_cve_data["cpe"] = [ + "cpe:2.3:a:vendor:product:1.0:*:*:*:*:*:*:*", + "cpe:2.3:a:other:software:2.0:*:*:*:*:*:*:*" + ] + + mock_cve = MockCVE(sample_cve_data) + vuln = convert_cve(mock_cve) + + # Check affects + assert vuln.affects is not None + assert len(vuln.affects.developer) == 2 + assert "vendor" in vuln.affects.developer + assert "other" in vuln.affects.developer + assert vuln.affects.deployer == [] + assert len(vuln.affects.artifacts) == 2 + assert vuln.affects.artifacts[0].type == ArtifactTypeEnum.system + + def test_convert_cve_references(self, sample_cve_data): + """Test conversion of CVE references.""" + sample_cve_data["references"] = [ + "https://example.com/advisory", + "https://github.com/vendor/repo/issues/123" + ] + + mock_cve = MockCVE(sample_cve_data) + vuln = convert_cve(mock_cve) + + assert vuln.references is not None + assert len(vuln.references) == 3 # 1 NVD + 2 additional + + # Check NVD reference + nvd_ref = vuln.references[0] + assert nvd_ref.type == "source" + assert nvd_ref.label == "NVD entry" + assert nvd_ref.url == sample_cve_data["url"] + + # Check additional references + for i, ref in enumerate(vuln.references[1:], 1): + assert ref.type == "source" + assert ref.url == sample_cve_data["references"][i-1] + + def test_convert_cve_description(self, sample_cve_data): + """Test conversion of CVE description.""" + mock_cve = MockCVE(sample_cve_data) + vuln = convert_cve(mock_cve) + + assert vuln.description is not None + assert vuln.description.lang == "eng" + assert vuln.description.value == f"{sample_cve_data['id']} Detail" + + def test_convert_cve_credit(self, sample_cve_data): + """Test conversion of CVE credit information.""" + mock_cve = MockCVE(sample_cve_data) + vuln = convert_cve(mock_cve) + + assert vuln.credit is not None + assert len(vuln.credit) == 1 + assert vuln.credit[0].lang == "eng" + assert vuln.credit[0].value == sample_cve_data["sourceIdentifier"] + + def test_convert_cve_dates(self, sample_cve_data): + """Test conversion of CVE date fields.""" + mock_cve = MockCVE(sample_cve_data) + vuln = convert_cve(mock_cve) + + assert vuln.published_date == date(2023, 1, 15) + assert vuln.last_modified_date == date(2023, 1, 16) + + def test_convert_cve_empty_cpe(self, sample_cve_data): + """Test CVE conversion with empty CPE list.""" + sample_cve_data["cpe"] = [] + + mock_cve = MockCVE(sample_cve_data) + vuln = convert_cve(mock_cve) + + assert vuln.affects is not None + assert vuln.affects.developer == [] + assert vuln.affects.deployer == [] + assert vuln.affects.artifacts == [] + + def test_convert_cve_empty_references(self, sample_cve_data): + """Test CVE conversion with empty references.""" + sample_cve_data["references"] = [] + + mock_cve = MockCVE(sample_cve_data) + vuln = convert_cve(mock_cve) + + assert vuln.references is not None + assert len(vuln.references) == 1 # Only NVD reference + assert vuln.references[0].label == "NVD entry" diff --git a/tests/unit/connectors/test_inspect.py b/tests/unit/connectors/test_inspect.py new file mode 100644 index 0000000..58c8c43 --- /dev/null +++ b/tests/unit/connectors/test_inspect.py @@ -0,0 +1,223 @@ +""" +Unit tests for Inspect connector. +""" + +import pytest +from unittest.mock import Mock, patch + +from avidtools.connectors.inspect import ( + import_eval_log, + convert_eval_log, + human_readable_name +) +from avidtools.datamodels.report import Report +from avidtools.datamodels.enums import ArtifactTypeEnum, ClassEnum, TypeEnum + + +class MockEvalLog: + """Mock EvalLog object for testing.""" + + def __init__(self): + self.eval = Mock() + self.eval.model = "openai/gpt-4" + self.eval.task = "benchmarks/truthfulqa" + self.eval.dataset = Mock() + self.eval.dataset.name = "truthfulqa" + self.eval.dataset.location = "https://example.com/dataset" + # Create a mock metric with proper name attribute + mock_metric = Mock() + mock_metric.name = "accuracy/score" + + # Create a mock scorer with proper name attribute + mock_scorer = Mock() + mock_scorer.name = "accuracy" + mock_scorer.metrics = [mock_metric] + + self.eval.scorers = [mock_scorer] + + self.samples = [ + Mock( + input="What is 2+2?", + output="4", + score=Mock(value=1.0) + ) + ] + + # Create mock results with scores + mock_score = Mock() + mock_score.name = "accuracy" + mock_score.metrics = {"accuracy": Mock(value=0.95)} + + self.results = Mock() + self.results.scores = [mock_score] + + +class TestInspectConnector: + """Test cases for Inspect connector functions.""" + + @patch('avidtools.connectors.inspect.read_eval_log') + def test_import_eval_log_success(self, mock_read_eval_log): + """Test successful import of evaluation log.""" + file_path = "/path/to/eval.json" + mock_eval_log = MockEvalLog() + mock_read_eval_log.return_value = mock_eval_log + + result = import_eval_log(file_path) + + assert result == mock_eval_log + mock_read_eval_log.assert_called_once_with(file_path) + + @patch('avidtools.connectors.inspect.read_eval_log') + def test_import_eval_log_missing_import(self, mock_read_eval_log): + """Test import when inspect_ai is not available.""" + mock_read_eval_log.side_effect = ImportError( + "inspect_ai package is required for this functionality" + ) + + with pytest.raises(ImportError): + import_eval_log("/path/to/eval.json") + + def test_human_readable_name_mapping(self): + """Test human readable name mappings.""" + assert human_readable_name["openai"] == "OpenAI" + assert human_readable_name["anthropic"] == "Anthropic" + assert human_readable_name["google"] == "Google" + assert human_readable_name["huggingface"] == "Hugging Face" + assert human_readable_name["meta-llama"] == "Meta" + + @patch('avidtools.connectors.inspect.import_eval_log') + def test_convert_eval_log_basic(self, mock_import): + """Test basic conversion of evaluation log to AVID reports.""" + mock_eval_log = MockEvalLog() + mock_import.return_value = mock_eval_log + + file_path = "/path/to/eval.json" + reports = convert_eval_log(file_path) + + assert len(reports) == 1 + assert isinstance(reports[0], Report) + + report = reports[0] + assert report.data_type == "AVID" + + @patch('avidtools.connectors.inspect.import_eval_log') + def test_convert_eval_log_affects(self, mock_import): + """Test conversion of affects information.""" + mock_eval_log = MockEvalLog() + mock_import.return_value = mock_eval_log + + reports = convert_eval_log("/path/to/eval.json") + report = reports[0] + + assert report.affects is not None + assert report.affects.developer == ["OpenAI"] + assert report.affects.deployer == ["openai/gpt-4"] + assert len(report.affects.artifacts) == 1 + assert report.affects.artifacts[0].type == ArtifactTypeEnum.model + assert report.affects.artifacts[0].name == "gpt-4" + + @patch('avidtools.connectors.inspect.import_eval_log') + def test_convert_eval_log_problemtype(self, mock_import): + """Test conversion of problemtype information.""" + mock_eval_log = MockEvalLog() + mock_import.return_value = mock_eval_log + + reports = convert_eval_log("/path/to/eval.json") + report = reports[0] + + assert report.problemtype is not None + assert report.problemtype.classof == ClassEnum.llm + assert report.problemtype.type == TypeEnum.measurement + assert report.problemtype.description.lang == "eng" + assert "gpt-4" in report.problemtype.description.value + assert "truthfulqa" in report.problemtype.description.value + + @patch('avidtools.connectors.inspect.import_eval_log') + def test_convert_eval_log_references(self, mock_import): + """Test conversion of references.""" + mock_eval_log = MockEvalLog() + mock_import.return_value = mock_eval_log + + reports = convert_eval_log("/path/to/eval.json") + report = reports[0] + + assert report.references is not None + assert len(report.references) == 1 + + ref = report.references[0] + assert ref.type == "source" + assert "truthfulqa" in ref.label + assert ref.url == "https://example.com/dataset" + + @patch('avidtools.connectors.inspect.import_eval_log') + def test_convert_eval_log_metrics(self, mock_import): + """Test conversion of metrics.""" + mock_eval_log = MockEvalLog() + mock_import.return_value = mock_eval_log + + reports = convert_eval_log("/path/to/eval.json") + report = reports[0] + + assert report.metrics is not None + assert len(report.metrics) == 1 + + metric = report.metrics[0] + assert metric.name == "accuracy" + assert metric.detection_method.name == "accuracy" + assert metric.results["value"] == 0.95 + assert metric.results["scorer"] == "accuracy" + + @patch('avidtools.connectors.inspect.import_eval_log') + def test_convert_eval_log_description(self, mock_import): + """Test conversion of description.""" + mock_eval_log = MockEvalLog() + mock_import.return_value = mock_eval_log + + reports = convert_eval_log("/path/to/eval.json") + report = reports[0] + + assert report.description is not None + assert report.description.lang == "eng" + assert "gpt-4" in report.description.value + assert "truthfulqa" in report.description.value + assert "What is 2+2?" in report.description.value + assert "4" in report.description.value + + @patch('avidtools.connectors.inspect.import_eval_log') + def test_convert_eval_log_multiple_samples(self, mock_import): + """Test conversion with multiple samples.""" + mock_eval_log = MockEvalLog() + mock_eval_log.samples = [ + Mock( + input="Question 1", + output="Answer 1", + score=Mock(value=0.8) + ), + Mock( + input="Question 2", + output="Answer 2", + score=Mock(value=0.9) + ) + ] + mock_import.return_value = mock_eval_log + + reports = convert_eval_log("/path/to/eval.json") + + assert len(reports) == 2 + assert all(isinstance(r, Report) for r in reports) + assert "Question 1" in reports[0].description.value + assert "Question 2" in reports[1].description.value + + @patch('avidtools.connectors.inspect.import_eval_log') + def test_convert_eval_log_different_model(self, mock_import): + """Test conversion with different model provider.""" + mock_eval_log = MockEvalLog() + mock_eval_log.eval.model = "anthropic/claude-3" + mock_import.return_value = mock_eval_log + + reports = convert_eval_log("/path/to/eval.json") + report = reports[0] + + assert report.affects.developer == ["Anthropic"] + assert report.affects.deployer == ["anthropic/claude-3"] + assert report.affects.artifacts[0].name == "claude-3" diff --git a/tests/unit/datamodels/__init__.py b/tests/unit/datamodels/__init__.py new file mode 100644 index 0000000..4613180 --- /dev/null +++ b/tests/unit/datamodels/__init__.py @@ -0,0 +1 @@ +# Data models unit tests diff --git a/tests/unit/datamodels/test_components.py b/tests/unit/datamodels/test_components.py new file mode 100644 index 0000000..b107f79 --- /dev/null +++ b/tests/unit/datamodels/test_components.py @@ -0,0 +1,206 @@ +""" +Unit tests for AVID data model components. +""" + +import pytest +from pydantic import ValidationError + +from avidtools.datamodels.components import ( + LangValue, + Artifact, + Detection, + Affects, + Problemtype, + Metric, + Reference, + AvidTaxonomy, + Impact, +) +from avidtools.datamodels.enums import ( + ArtifactTypeEnum, + ClassEnum, + LifecycleEnum, + MethodEnum, + SepEnum, + TypeEnum, +) + + +class TestLangValue: + """Test cases for LangValue data model.""" + + def test_lang_value_creation(self): + """Test creating a valid LangValue instance.""" + lang_value = LangValue(lang="en", value="Test value") + assert lang_value.lang == "en" + assert lang_value.value == "Test value" + + def test_lang_value_validation(self): + """Test LangValue validation.""" + with pytest.raises(ValidationError): + LangValue(lang="en") # Missing value + + with pytest.raises(ValidationError): + LangValue(value="Test value") # Missing lang + + +class TestArtifact: + """Test cases for Artifact data model.""" + + def test_artifact_creation(self): + """Test creating a valid Artifact instance.""" + artifact = Artifact(type=ArtifactTypeEnum.model, name="Test Model") + assert artifact.type == ArtifactTypeEnum.model + assert artifact.name == "Test Model" + + def test_artifact_validation(self): + """Test Artifact validation.""" + with pytest.raises(ValidationError): + Artifact(type="invalid_type", name="Test Model") + + with pytest.raises(ValidationError): + Artifact(type=ArtifactTypeEnum.model) # Missing name + + +class TestDetection: + """Test cases for Detection data model.""" + + def test_detection_creation(self): + """Test creating a valid Detection instance.""" + detection = Detection(type=MethodEnum.test, name="Statistical Test") + assert detection.type == MethodEnum.test + assert detection.name == "Statistical Test" + + +class TestAffects: + """Test cases for Affects data model.""" + + def test_affects_creation(self): + """Test creating a valid Affects instance.""" + artifact = Artifact(type=ArtifactTypeEnum.model, name="Test Model") + affects = Affects( + developer=["OpenAI"], + deployer=["Company A"], + artifacts=[artifact] + ) + assert affects.developer == ["OpenAI"] + assert affects.deployer == ["Company A"] + assert len(affects.artifacts) == 1 + assert affects.artifacts[0].name == "Test Model" + + def test_affects_empty_lists(self): + """Test Affects with empty lists.""" + affects = Affects(developer=[], deployer=[], artifacts=[]) + assert affects.developer == [] + assert affects.deployer == [] + assert affects.artifacts == [] + + +class TestProblemtype: + """Test cases for Problemtype data model.""" + + def test_problemtype_creation(self): + """Test creating a valid Problemtype instance.""" + description = LangValue(lang="en", value="Test problem description") + problemtype = Problemtype( + classof=ClassEnum.llm, + type=TypeEnum.measurement, + description=description + ) + assert problemtype.classof == ClassEnum.llm + assert problemtype.type == TypeEnum.measurement + assert problemtype.description.value == "Test problem description" + + def test_problemtype_optional_type(self): + """Test Problemtype with optional type field.""" + description = LangValue(lang="en", value="Test description") + problemtype = Problemtype( + classof=ClassEnum.aiid, + description=description + ) + assert problemtype.classof == ClassEnum.aiid + assert problemtype.type is None + + +class TestMetric: + """Test cases for Metric data model.""" + + def test_metric_creation(self): + """Test creating a valid Metric instance.""" + detection = Detection(type=MethodEnum.test, name="T-test") + metric = Metric( + name="Accuracy", + detection_method=detection, + results={"value": 0.95, "confidence": 0.05} + ) + assert metric.name == "Accuracy" + assert metric.detection_method.name == "T-test" + assert metric.results["value"] == 0.95 + + +class TestReference: + """Test cases for Reference data model.""" + + def test_reference_creation(self): + """Test creating a valid Reference instance.""" + reference = Reference( + type="source", + label="Test Reference", + url="https://example.com" + ) + assert reference.type == "source" + assert reference.label == "Test Reference" + assert reference.url == "https://example.com" + + def test_reference_optional_type(self): + """Test Reference with optional type field.""" + reference = Reference( + label="Test Reference", + url="https://example.com" + ) + assert reference.type is None + assert reference.label == "Test Reference" + + +class TestAvidTaxonomy: + """Test cases for AvidTaxonomy data model.""" + + def test_avid_taxonomy_creation(self): + """Test creating a valid AvidTaxonomy instance.""" + taxonomy = AvidTaxonomy( + vuln_id="AVID-2023-001", + risk_domain=["Security"], + sep_view=[SepEnum.S0100], + lifecycle_view=[LifecycleEnum.L04], + taxonomy_version="1.0" + ) + assert taxonomy.vuln_id == "AVID-2023-001" + assert taxonomy.risk_domain == ["Security"] + assert taxonomy.sep_view == [SepEnum.S0100] + assert taxonomy.lifecycle_view == [LifecycleEnum.L04] + + def test_avid_taxonomy_optional_vuln_id(self): + """Test AvidTaxonomy with optional vuln_id.""" + taxonomy = AvidTaxonomy( + risk_domain=["Performance"], + sep_view=[SepEnum.P0204], + lifecycle_view=[LifecycleEnum.L05], + taxonomy_version="1.0" + ) + assert taxonomy.vuln_id is None + + +class TestImpact: + """Test cases for Impact data model.""" + + def test_impact_creation(self): + """Test creating a valid Impact instance.""" + taxonomy = AvidTaxonomy( + risk_domain=["Security"], + sep_view=[SepEnum.S0100], + lifecycle_view=[LifecycleEnum.L04], + taxonomy_version="1.0" + ) + impact = Impact(avid=taxonomy) + assert impact.avid.risk_domain == ["Security"] + assert impact.avid.taxonomy_version == "1.0" diff --git a/tests/unit/datamodels/test_enums.py b/tests/unit/datamodels/test_enums.py new file mode 100644 index 0000000..c02a4a5 --- /dev/null +++ b/tests/unit/datamodels/test_enums.py @@ -0,0 +1,104 @@ +""" +Unit tests for AVID data model enums. +""" + +from avidtools.datamodels.enums import ( + ArtifactTypeEnum, + SepEnum, + LifecycleEnum, + ClassEnum, + TypeEnum, + MethodEnum, +) + + +class TestArtifactTypeEnum: + """Test cases for ArtifactTypeEnum.""" + + def test_artifact_type_values(self): + """Test that ArtifactTypeEnum has expected values.""" + assert ArtifactTypeEnum.dataset == "Dataset" + assert ArtifactTypeEnum.model == "Model" + assert ArtifactTypeEnum.system == "System" + + def test_artifact_type_membership(self): + """Test membership in ArtifactTypeEnum.""" + assert "Dataset" in ArtifactTypeEnum + assert "Model" in ArtifactTypeEnum + assert "System" in ArtifactTypeEnum + assert "Unknown" not in ArtifactTypeEnum + + +class TestSepEnum: + """Test cases for SepEnum.""" + + def test_sep_enum_security_categories(self): + """Test security-related SEP categories.""" + assert SepEnum.S0100 == "S0100: Software Vulnerability" + assert SepEnum.S0200 == "S0200: Supply Chain Compromise" + assert SepEnum.S0400 == "S0400: Model Bypass" + + def test_sep_enum_ethics_categories(self): + """Test ethics-related SEP categories.""" + assert SepEnum.E0100 == "E0100: Bias/ Discrimination" + assert SepEnum.E0200 == "E0200: Explainability" + assert SepEnum.E0400 == "E0400: Misinformation" + + def test_sep_enum_performance_categories(self): + """Test performance-related SEP categories.""" + assert SepEnum.P0100 == "P0100: Data issues" + assert SepEnum.P0200 == "P0200: Model issues" + assert SepEnum.P0400 == "P0400: Safety" + + +class TestLifecycleEnum: + """Test cases for LifecycleEnum.""" + + def test_lifecycle_enum_values(self): + """Test that LifecycleEnum has expected values.""" + assert LifecycleEnum.L01 == "L01: Business Understanding" + assert LifecycleEnum.L02 == "L02: Data Understanding" + assert LifecycleEnum.L03 == "L03: Data Preparation" + assert LifecycleEnum.L04 == "L04: Model Development" + assert LifecycleEnum.L05 == "L05: Evaluation" + assert LifecycleEnum.L06 == "L06: Deployment" + + def test_lifecycle_enum_ordering(self): + """Test that lifecycle enums maintain expected order.""" + lifecycle_stages = [ + LifecycleEnum.L01, LifecycleEnum.L02, LifecycleEnum.L03, + LifecycleEnum.L04, LifecycleEnum.L05, LifecycleEnum.L06 + ] + assert len(lifecycle_stages) == 6 + + +class TestClassEnum: + """Test cases for ClassEnum.""" + + def test_class_enum_values(self): + """Test that ClassEnum has expected values.""" + assert ClassEnum.aiid == "AIID Incident" + assert ClassEnum.atlas == "ATLAS Case Study" + assert ClassEnum.cve == "CVE Entry" + assert ClassEnum.llm == "LLM Evaluation" + assert ClassEnum.na == "Undefined" + + +class TestTypeEnum: + """Test cases for TypeEnum.""" + + def test_type_enum_values(self): + """Test that TypeEnum has expected values.""" + assert TypeEnum.issue == "Issue" + assert TypeEnum.advisory == "Advisory" + assert TypeEnum.measurement == "Measurement" + assert TypeEnum.detection == "Detection" + + +class TestMethodEnum: + """Test cases for MethodEnum.""" + + def test_method_enum_values(self): + """Test that MethodEnum has expected values.""" + assert MethodEnum.test == "Significance Test" + assert MethodEnum.thres == "Static Threshold"