diff --git a/.gitignore b/.gitignore index 5220459a..b79253cf 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,5 @@ tmp* enqueue_jobs.sh gen_*.slurm +test-output/ +test-output.json diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..32309738 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,91 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Overview + +ParEval-Repo is an LLM benchmarking suite for repository-scale translation of HPC (parallel) codes. It translates codebases between parallel programming models (e.g., CUDA → Kokkos, OpenMP-offload → CUDA) using LLMs, then builds and validates the translated code on HPC systems. + +## Setup + +```bash +# Preferred (uses exact pinned versions) +uv sync && . .venv/bin/activate + +# Alternative +pip install -r requirements.txt +``` + +Python 3.11.13+ required. + +## Key Commands + +### Translation (LLM inference) +```bash +python src/translate/translate.py --help + +# Example: naive translation +python src/translate/translate.py \ + --input targets/XSBench/openmp-offload \ + --output /path/to/output \ + --src-model openmp-offload \ + --dst-model cuda \ + --method naive \ + --config config/perlmutter-config.json +``` + +### Running drivers (build and test translated repos) +```bash +python src/drivers/run-all.py --help + +# Example +python src/drivers/run-all.py \ + --translations-root /path/to/translations \ + --output results.json \ + --config config/perlmutter-config.json +``` + +## Architecture + +### Two-Phase Pipeline + +1. **Translation phase** (`src/translate/`): Takes a source repo + `target.json` for both input and output, calls an LLM to produce translated code files. +2. **Driver phase** (`src/drivers/`): Builds and runs the translated repos, comparing outputs against expected values. + +### Translation Methods (`src/translate/`) + +Three strategies, selectable via `--method`: + +- **naive** (`naive/`): Translates file-by-file with full repo context in a single LLM prompt. Uses `ChunkFileAgent` for large files. +- **top-down-agentic** (`top_down_agentic/`): Multi-agent pipeline: `DependencyAgent` builds a file dependency tree → `ChunkAgent` splits large files → `ContextAgent` gathers relevant context → translates each file. +- **swe-agent** (`swe_agent/`): Wraps the external SWE-agent tool for autonomous translation. + +All methods inherit from `Translator` ABC (`translator.py`) and use `GeneratorMixin` (`generator_mixin.py`) for unified LLM access across backends (OpenAI, Gemini, HuggingFace, vLLM, local). + +### Target Configuration (`target.json`) + +Each `targets///` directory requires a `target.json` with: +- Build/run commands and timeouts +- Expected output strings for validation (`debug_outputs`, `debug_type`) +- File classifications (build files, main entry points) +- Dependency module names (resolved via system config) + +The driver reads `target.json` to know how to build, run, and validate each translated repo. + +### System Configuration (`config/`) + +JSON files per HPC system (e.g., `perlmutter-config.json`) that map dependency names to module load commands and set GPU architecture (`sm`). Passed to both translation and driver scripts via `--config`. + +### Driver Utilities (`src/drivers/util.py`) + +Core utility classes used throughout drivers: +- `CommandExecutor` — runs shell commands with timeout and dry-run support +- `ConfigManager` — loads and resolves system config +- `DataManager` — persists results to JSON +- `ResultBuilder` — constructs structured build/run result objects + +## Adding a New Target + +1. Create `targets///repo/` with source code +2. Create `targets///target.json` following the schema in existing targets +3. Provide a `target.json` for both source and destination models when translating diff --git a/config/nano_v3_reasoning_parser.py b/config/nano_v3_reasoning_parser.py new file mode 100644 index 00000000..d3ea6626 --- /dev/null +++ b/config/nano_v3_reasoning_parser.py @@ -0,0 +1,19 @@ +from vllm.reasoning.abs_reasoning_parsers import ReasoningParserManager +from vllm.reasoning.deepseek_r1_reasoning_parser import DeepSeekR1ReasoningParser + + +@ReasoningParserManager.register_module("nano_v3") +class NanoV3ReasoningParser(DeepSeekR1ReasoningParser): + def extract_reasoning(self, model_output, request): + reasoning_content, final_content = super().extract_reasoning( + model_output, request + ) + if ( + hasattr(request, "chat_template_kwargs") + and request.chat_template_kwargs + and request.chat_template_kwargs.get("enable_thinking") is False + and final_content is None + ): + reasoning_content, final_content = final_content, reasoning_content + + return reasoning_content, final_content \ No newline at end of file diff --git a/config/perlmutter-vllm-glm.yaml b/config/perlmutter-vllm-glm.yaml new file mode 100644 index 00000000..5af2f165 --- /dev/null +++ b/config/perlmutter-vllm-glm.yaml @@ -0,0 +1,8 @@ +# vLLM config file for Perlmutter when using GLM-4.7-GGUF:Q4_K_M + +tensor-parallel-size: 4 +max-model-len: 131072 +max-num-seqs: 2 +enable-auto-tool-choice: true +tool-call-parser: glm45 +reasoning-parser: glm45 diff --git a/config/perlmutter-vllm-nemo.yaml b/config/perlmutter-vllm-nemo.yaml new file mode 100644 index 00000000..6f8dc1a4 --- /dev/null +++ b/config/perlmutter-vllm-nemo.yaml @@ -0,0 +1,10 @@ +# vLLM config file for Perlmutter when using nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16 + +tensor-parallel-size: 4 +max-model-len: 262144 +max-num-seqs: 8 +reasoning-parser-plugin: config/nano_v3_reasoning_parser.py +reasoning-parser: nano_v3 +enable-auto-tool-choice: true +tool-call-parser: qwen3_coder +trust-remote-code: true diff --git a/config/perlmutter-vllm-oss.yaml b/config/perlmutter-vllm-oss.yaml new file mode 100644 index 00000000..bd39f67c --- /dev/null +++ b/config/perlmutter-vllm-oss.yaml @@ -0,0 +1,12 @@ +# vLLM config file for Perlmutter when using openai/gpt-oss-120b + +tensor-parallel-size: 4 +async-scheduling: true +no-enable-prefix-caching: true +max-model-len: 131072 +gpu-memory-utilization: 0.95 +max-num-seqs: 4 +max-num-batched-tokens: 2048 +tool-call-parser: openai +reasoning-parser: openai_gptoss +enable-auto-tool-choice: true diff --git a/config/perlmutter-vllm-qwen.yaml b/config/perlmutter-vllm-qwen.yaml new file mode 100644 index 00000000..a4077901 --- /dev/null +++ b/config/perlmutter-vllm-qwen.yaml @@ -0,0 +1,7 @@ +# vLLM config file for Perlmutter when using Qwen/Qwen3-Coder-Next + +tensor-parallel-size: 4 +max-model-len: 262144 +max-num-seqs: 2 +enable-auto-tool-choice: true +tool-call-parser: qwen3_coder diff --git a/pyproject.toml b/pyproject.toml index 602116c4..fabd0ca8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,4 +11,5 @@ dependencies = [ "langchain-text-splitters>=0.3.11", "openai>=1.107.3", "pandas>=2.3.2", + "tiktoken>=0.9.0", ] diff --git a/src/translate/generator_mixin.py b/src/translate/generator_mixin.py index 7527d640..16ec4ce8 100644 --- a/src/translate/generator_mixin.py +++ b/src/translate/generator_mixin.py @@ -3,14 +3,20 @@ author: Daniel Nichols date: November 2024 """ +import logging import os +import sys import time import pickle import atexit +import subprocess +from pathlib import Path from typing import Optional, Literal, Callable, List, Tuple, Dict, Union, Any from math import ceil from dataclasses import dataclass +logger = logging.getLogger("pareval-repo") + @dataclass class GenericResponse: """ Class to hold a generic response from any generator. @@ -30,11 +36,11 @@ class GenericResponse: VLLM_BASE_URL = "http://127.0.0.1:8000/v1" VLLM_API_KEY = "token_abc123" VLLM_TIMEOUT = 1200 -REASONING_TEMP = 0.6 -REASONING_TOP_P = 0.95 -REASONING_MAX_TOKENS = 8192 -RETRY_DELAY = 15 +BASE_COOLDOWN = 10 CACHE_RETENTION_TIME = 60 +VLLM_SERVE_CHECK_COOLDOWN = 10 +VLLM_MAX_SERVE_CHECK_ATTEMPTS = 200 +VLLM_MAX_TIMEOUT_RETRIES = 3 @dataclass @@ -50,6 +56,10 @@ class GeneratorMixin: """ Mixin class for providing a unified generative interface within other classes. """ + # Token limit constants (match keet's values) + _REASONING_OUTPUT_RESERVE: int = 32000 # tokens reserved for reasoning and output + _MAX_TOKEN_COUNT: int = 131072 # 128k tokens, limit for most models + _backend: Literal["openai", "gemini", "hf", "vllm", "local"] _llm_name: str _generator: Optional[Callable] = None @@ -70,6 +80,12 @@ class GeneratorMixin: _hf_inference_client: Optional["InferenceClient"] = None # type: ignore # noqa: F821 _vllm_client: Optional['OpenAI'] = None # type: ignore # noqa: F821 _async_mode: bool = False + _vllm_environment: Optional[str] = None + _vllm_yaml_config: Optional[str] = None + _vllm_keepalive_id: Optional[int] = None + _api_key: Optional[str] = None + _api_base_url: Optional[str] = None + _encoder: Optional[Any] = None # Backend configurations _BACKEND_CONFIGS = { @@ -105,6 +121,11 @@ def __init__( system_prompt: Optional[str] = None, disable_request_cache: Optional[bool] = False, async_mode: bool = False, + vllm_environment: Optional[str] = None, + vllm_yaml_config: Optional[str] = None, + vllm_keepalive_id: Optional[int] = None, + api_key: Optional[str] = None, + api_base_url: Optional[str] = None, ): self._backend = backend self._llm_name = llm_name @@ -114,10 +135,17 @@ def __init__( self._system_prompt = system_prompt self._disable_request_cache = disable_request_cache self._async_mode = async_mode + self._vllm_environment = vllm_environment + self._vllm_yaml_config = vllm_yaml_config + self._vllm_keepalive_id = vllm_keepalive_id + self._api_key = api_key + self._api_base_url = api_base_url + self._encoder = None self._validate_backend() self._configure_backend(rpm_limit, tpm_limit) self._setup_rate_limiting() + self._setup_encoder() def _validate_backend(self) -> None: @@ -134,8 +162,8 @@ def _configure_backend(self, rpm_limit: Optional[int], tpm_limit: Optional[int]) self._rpm_limit = rpm_limit or config.rpm_limit self._tpm_limit = tpm_limit or config.tpm_limit - # Check API key requirements - if config.requires_api_key and config.api_key_env_var: + # Check API key requirements (skip if a generic api_key was provided) + if config.requires_api_key and config.api_key_env_var and self._api_key is None: if not os.environ.get(config.api_key_env_var): raise ValueError(f"{config.api_key_env_var} environment variable not set.") @@ -152,10 +180,28 @@ def _configure_backend(self, rpm_limit: Optional[int], tpm_limit: Optional[int]) raise NotImplementedError(f"backend '{self._backend}' not implemented.") + def _setup_encoder(self) -> None: + """Set up a tiktoken encoder for token counting if the backend supports it.""" + if self._backend not in ("openai", "vllm"): + return + try: + import tiktoken + query_str = self._llm_name + if "/" in query_str: + query_str = query_str.split("/")[-1] + self._encoder = tiktoken.encoding_for_model(query_str) + except Exception: + # Model not in tiktoken's registry; get_token_count will use a default encoding + self._encoder = None + + def _setup_openai(self) -> None: """Setup OpenAI client and generator.""" from openai import OpenAI - self._openai_client = OpenAI() + self._openai_client = OpenAI( + api_key=self._api_key, # None → SDK reads OPENAI_API_KEY env var + base_url=self._api_base_url, # None → SDK uses default + ) self._generator = self._generate_openai @@ -165,18 +211,116 @@ def _setup_vllm(self) -> None: from openai import AsyncOpenAI as OpenAI else: from openai import OpenAI + + api_key = self._api_key or os.environ.get("VLLM_API_KEY", VLLM_API_KEY) + base_url = self._api_base_url or VLLM_BASE_URL + self._vllm_client = OpenAI( - base_url=VLLM_BASE_URL, - api_key=VLLM_API_KEY, + base_url=base_url, + api_key=api_key, timeout=VLLM_TIMEOUT ) self._generator = self._generate_vllm + if self._vllm_environment: + self._launch_vllm_server( + Path(self._vllm_environment), + self._vllm_yaml_config, + self._vllm_keepalive_id, + ) + elif not self._test_vllm_server(): + self._wait_for_vllm_server() + + + def _test_vllm_server(self) -> bool: + """Test if the vLLM server is running.""" + base = (self._api_base_url or VLLM_BASE_URL).rstrip("/") + if base.endswith("/v1"): + base = base[:-3] + health_url = base + "/health" + return ( + subprocess.run( + ["curl", health_url], + capture_output=True, + text=True, + check=False, + ).returncode == 0 + ) + + + def _wait_for_vllm_server(self) -> None: + """Poll until the vLLM server is ready or max attempts are exceeded.""" + num_attempts = 0 + while num_attempts < VLLM_MAX_SERVE_CHECK_ATTEMPTS: + if self._test_vllm_server(): + return + logger.info("vLLM server not ready, checking again after %d seconds...", VLLM_SERVE_CHECK_COOLDOWN) + time.sleep(VLLM_SERVE_CHECK_COOLDOWN) + num_attempts += 1 + raise RuntimeError("vLLM server not ready after max attempts.") + + + def _launch_vllm_server( + self, + environment_path: Path, + yaml_config: Optional[str] = None, + keepalive_id: Optional[int] = None, + ) -> Optional[subprocess.Popen]: + """Launch a vLLM server in the background using the given Python environment. + + Args: + environment_path: Path to the Python virtual environment directory. + yaml_config: Optional path to a vLLM YAML configuration file. + keepalive_id: If set, write the server PID to a file instead of registering + an atexit handler so the process can outlive this Python session. + + Returns: + The Popen object for the vLLM server, or None if the server was already running. + """ + if self._test_vllm_server(): + return None + + py_executable = environment_path / "bin" / "python" + vllm_executable = environment_path / "bin" / "vllm" + + api_key = self._api_key or os.environ.get("VLLM_API_KEY", VLLM_API_KEY) + vllm_command = [ + str(py_executable), + str(vllm_executable), + "serve", + "--model", self._llm_name, + "--host", "127.0.0.1", + "--port", "8000", + "--api-key", api_key, + ] + if self._is_reasoning_model(): + vllm_command.extend(["--reasoning-parser", "deepseek_r1"]) + if yaml_config: + vllm_command.extend(["--config", yaml_config]) + + logger.info("Launching vLLM server: %s", " ".join(vllm_command)) + vllm_server = subprocess.Popen( + vllm_command, + start_new_session=(keepalive_id is not None), + ) + + self._wait_for_vllm_server() + + if keepalive_id is not None: + pid_file = Path(sys.argv[0]).resolve().parent / f"vllm-server-{keepalive_id}.pid" + pid_file.write_text(str(vllm_server.pid), encoding="utf-8") + else: + atexit.register(vllm_server.terminate) + + logger.info("vLLM server ready.") + return vllm_server + def _setup_gemini(self) -> None: """Setup Gemini client and generator.""" import google.generativeai as genai - genai.configure(api_key=os.environ["GEMINI_API_KEY"]) + api_key = self._api_key or os.environ["GEMINI_API_KEY"] + genai.configure(api_key=api_key) self._gemini_client = genai.GenerativeModel( self._llm_name, system_instruction=self._system_prompt @@ -187,7 +331,8 @@ def _setup_gemini(self) -> None: def _setup_huggingface(self) -> None: """Setup Hugging Face client and generator.""" from huggingface_hub import InferenceClient - self._hf_inference_client = InferenceClient(api_key=os.environ["HF_API_KEY"]) + api_key = self._api_key or os.environ["HF_API_KEY"] + self._hf_inference_client = InferenceClient(api_key=api_key) self._generator = self._generate_hf @@ -208,7 +353,7 @@ def _load_request_cache(self) -> None: try: with open(f".request_cache_{self._backend}.pkl", "rb") as f: self._recent_requests = pickle.load(f) - print(f"Loaded {len(self._recent_requests)} recent requests from cache.") + logger.debug("Loaded %d recent requests from cache.", len(self._recent_requests)) except FileNotFoundError: pass @@ -234,104 +379,63 @@ def _format_messages_list(self, prompt: str, return messages - def _generate_openai( - self, - prompt: str, - system_prompt: str, - max_new_tokens: int = 2048, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - n: int = 1, - **kwargs - ) -> List[GenericResponse]: - """ Generate text using the OpenAI API. - """ - if not self._openai_client: - raise ValueError("OpenAI client not initialized.") - - completion = self._openai_client.chat.completions.create( - model=self._llm_name, - messages=self._format_messages_list(prompt, system_prompt), - max_tokens=max_new_tokens, - temperature=temperature, - top_p=top_p, - n=1, - **kwargs - ) - - if not (completion and completion.choices): - raise ValueError("No completions returned from OpenAI.") - - return [GenericResponse(c.message.content, - completion.usage.prompt_tokens // n, - completion.usage.completion_tokens // n - ) for c in completion.choices] - - def _is_reasoning_model(self) -> bool: """Check if the current model is a reasoning model.""" - return "QwQ" in self._llm_name or "qwq" in self._llm_name + name_lower = self._llm_name.lower() + return any(m in name_lower for m in ["qwq", "deepseek_r1", "deepseek-r1"]) - def _adjust_parameters_for_reasoning(self, temperature: Optional[float], - top_p: Optional[float], - max_new_tokens: int) -> Tuple[float, float, int]: - """Adjust parameters for reasoning models.""" - if self._is_reasoning_model(): - return REASONING_TEMP, REASONING_TOP_P, REASONING_MAX_TOKENS - return temperature, top_p, max_new_tokens + def _generate_openai(self, prompt: str, system_prompt: str, **kwargs) -> List[GenericResponse]: + """ Generate text using the OpenAI responses API. + """ + if not self._openai_client: + raise ValueError("OpenAI client not initialized.") + messages = self._format_messages_list(prompt, system_prompt) + gen_kwargs: Dict[str, Any] = {"model": self._llm_name, "input": messages, **kwargs} + if self._llm_name.endswith("-thinking"): + gen_kwargs["model"] = self._llm_name.replace("-thinking", "") + gen_kwargs["reasoning"] = {"effort": "high"} - def _prepare_messages_for_reasoning(self, prompt: str, system_prompt: Optional[str]) -> List[Dict[str, str]]: - """Prepare messages for reasoning models by merging system prompt.""" - if self._is_reasoning_model() and system_prompt is not None: - return self._format_messages_list(system_prompt + "\n" + prompt, None) - return self._format_messages_list(prompt, system_prompt) + response = self._openai_client.responses.create(**gen_kwargs) + return [GenericResponse( + response.output_text, + response.usage.input_tokens, + response.usage.output_tokens, + )] - def _generate_vllm( - self, - prompt: str, - system_prompt: str, - max_new_tokens: int = 2048, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - n: int = 1, - **kwargs - ) -> List[GenericResponse]: - """ Generate text using the vLLM via the OpenAI server API. + + def _generate_vllm(self, prompt: str, system_prompt: str, **kwargs) -> List[GenericResponse]: + """ Generate text using the vLLM via the OpenAI responses API. """ + from openai import APITimeoutError if not self._vllm_client: raise ValueError("vLLM (OpenAI) client not initialized.") - # Adjust parameters for reasoning models - temperature, top_p, max_new_tokens = self._adjust_parameters_for_reasoning( - temperature, top_p, max_new_tokens - ) + messages = self._format_messages_list(prompt, system_prompt) + gen_kwargs: Dict[str, Any] = {"model": self._llm_name, "input": messages, **kwargs} + if "gpt-oss" in self._llm_name: + gen_kwargs["reasoning"] = {"effort": "high"} - # Prepare messages - messages = self._prepare_messages_for_reasoning(prompt, system_prompt) - is_reasoning = self._is_reasoning_model() - - completion = self._vllm_client.chat.completions.create( - model=self._llm_name, - messages=messages, - temperature=temperature, - top_p=top_p, - max_tokens=max_new_tokens, - n=n, - **kwargs - ) - - if not (completion and completion.choices): - raise ValueError("No completions returned from vLLM.") + for _ in range(VLLM_MAX_TIMEOUT_RETRIES): + try: + response = self._vllm_client.responses.create(**gen_kwargs) + break + except (TimeoutError, APITimeoutError) as e: + logger.warning("vLLM request timeout (%s), retrying...", e) + time.sleep(BASE_COOLDOWN) + continue + else: + raise RuntimeError( + f"vLLM request failed after {VLLM_MAX_TIMEOUT_RETRIES} timeout retries." + ) return [GenericResponse( - c.message.content if c.message.content else "", - completion.usage.prompt_tokens // n if completion.usage else 0, - completion.usage.completion_tokens // n if completion.usage else 0, - c.message.reasoning_content if is_reasoning else None - ) for c in completion.choices] + response.output_text, + response.usage.input_tokens if response.usage else 0, + response.usage.output_tokens if response.usage else 0, + )] def _parse_reasoning_response(self, response: str) -> Tuple[str, Optional[str]]: @@ -346,32 +450,22 @@ async def _generate_vllm_async( self, prompts: List[str], system_prompt: str, - max_new_tokens: int = 2048, - temperature: Optional[float] = None, - top_p: Optional[float] = None, **kwargs ) -> List[GenericResponse]: - """ Generate text using the vLLM via the OpenAI server API in async mode. + """ Generate text using the vLLM via the OpenAI completions API in async batch mode. """ if not self._vllm_client: raise ValueError("vLLM (OpenAI) client not initialized.") - # Adjust parameters for reasoning models - temperature, top_p, max_new_tokens = self._adjust_parameters_for_reasoning( - temperature, top_p, max_new_tokens - ) is_reasoning = self._is_reasoning_model() - # Prepare prompts for reasoning models + # Merge system prompt into user prompts for reasoning models if is_reasoning and system_prompt is not None: prompts = [system_prompt + "\n" + p for p in prompts] completion = await self._vllm_client.completions.create( model=self._llm_name, prompt=prompts, - temperature=temperature, - top_p=top_p, - max_tokens=max_new_tokens, **kwargs ) @@ -381,11 +475,8 @@ async def _generate_vllm_async( results = [] for c in completion.choices: response, reasoning = c.text, None - print(f"Raw response: {response}") - if is_reasoning: response, reasoning = self._parse_reasoning_response(response) - results.append(GenericResponse( response, completion.usage.prompt_tokens // len(prompts), @@ -396,16 +487,7 @@ async def _generate_vllm_async( return results - def _generate_gemini( - self, - prompt: str, - system_prompt: str, - max_new_tokens: int = 2048, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - n: int = 1, - **kwargs - ) -> List[GenericResponse]: + def _generate_gemini(self, prompt: str, system_prompt: str, **kwargs) -> List[GenericResponse]: """ Generate text using the Gemini API. """ import google.generativeai as genai @@ -420,28 +502,14 @@ def _generate_gemini( response = self._gemini_client.generate_content( prompt, - generation_config=genai.types.GenerationConfig( - candidate_count=1, - max_output_tokens=max_new_tokens, - temperature=temperature, - top_p=top_p, - ) + generation_config=genai.types.GenerationConfig(candidate_count=1), ) return [GenericResponse(response.text, response.usage_metadata.prompt_token_count, response.usage_metadata.candidates_token_count)] - def _generate_hf( - self, - prompt: str, - system_prompt: str, - max_new_tokens: int = 2048, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - n: int = 1, - **kwargs - ) -> List[GenericResponse]: + def _generate_hf(self, prompt: str, system_prompt: str, **kwargs) -> List[GenericResponse]: """ Generate text using the Hugging Face API. """ if not self._hf_inference_client: @@ -450,10 +518,6 @@ def _generate_hf( response = self._hf_inference_client.chat.completions.create( model=self._llm_name, messages=self._format_messages_list(prompt, system_prompt), - max_tokens=max_new_tokens, - temperature=temperature, - top_p=top_p, - n=1, **kwargs ) @@ -465,6 +529,31 @@ def _generate_hf( response.usage.completion_tokens)] + def get_token_count(self, prompt: str, default_encoding: str = "o200k_base") -> int: + """Return the number of tokens in the prompt according to tiktoken. + + Uses the model-specific encoding if available, falling back to default_encoding. + """ + import tiktoken + if self._encoder is None: + encoder = tiktoken.get_encoding(default_encoding) + return len(encoder.encode(prompt, disallowed_special=())) + return len(self._encoder.encode(prompt, disallowed_special=())) + + + def get_max_tokens(self) -> int: + """Return the effective maximum number of input tokens for the current model. + + Reserves _REASONING_OUTPUT_RESERVE tokens from the true maximum for reasoning + and output, matching keet's implementation. + """ + if "gpt-5" in self._llm_name: + if "thinking" in self._llm_name: + return 196000 - self._REASONING_OUTPUT_RESERVE + return 272000 - self._REASONING_OUTPUT_RESERVE + return self._MAX_TOKEN_COUNT - self._REASONING_OUTPUT_RESERVE + + @property def input_token_count(self): """ Return the total number of tokens used as input to the generator. @@ -502,10 +591,10 @@ def get_stats(self) -> Tuple[int, int, int]: def print_stats(self): """ Print the current statistics for the generator. """ - print(f"Input token count: {self._input_token_count}") - print(f"Output token count: {self._output_token_count}") - print(f"Total token count: {self._input_token_count + self._output_token_count}") - print(f"Request count: {self._request_count}") + logger.info("Input token count: %d", self._input_token_count) + logger.info("Output token count: %d", self._output_token_count) + logger.info("Total token count: %d", self._input_token_count + self._output_token_count) + logger.info("Request count: %d", self._request_count) def _enforce_limits(self) -> None: @@ -521,7 +610,7 @@ def _enforce_rpm_limit(self) -> None: wait_time = time.time() - self._recent_requests[0][0] if wait_time < CACHE_RETENTION_TIME: - print(f"Request limit met, waiting {ceil(CACHE_RETENTION_TIME - wait_time)} seconds...") + logger.warning("Request limit met, waiting %d seconds...", ceil(CACHE_RETENTION_TIME - wait_time)) while time.time() - self._recent_requests[0][0] < CACHE_RETENTION_TIME: time.sleep(1) @@ -536,7 +625,7 @@ def _enforce_tpm_limit(self) -> None: while sum(out_tokens for _, out_tokens in self._recent_requests) >= self._tpm_limit: wait_time = time.time() - self._recent_requests[0][0] if wait_time < CACHE_RETENTION_TIME: - print(f"Token limit met, waiting {ceil(CACHE_RETENTION_TIME - wait_time)} seconds...") + logger.warning("Token limit met, waiting %d seconds...", ceil(CACHE_RETENTION_TIME - wait_time)) while time.time() - self._recent_requests[0][0] < CACHE_RETENTION_TIME: time.sleep(1) @@ -547,15 +636,12 @@ def generate_async( self, prompts: List[str], system_prompt: Optional[str] = None, - max_new_tokens: int = 2048, - temperature: Optional[float] = None, - top_p: Optional[float] = None, **kwargs ) -> List[GenericResponse]: """ Generate text using the specified backend in async mode. """ import asyncio - print("Generating in async mode...") + logger.debug("Generating %d prompts in async mode.", len(prompts)) # Set system prompt if not provided if self._system_prompt is not None and system_prompt is None: @@ -563,16 +649,13 @@ def generate_async( # Fall back to synchronous generation for non-async backends or single prompts if not self._async_mode or len(prompts) < 2 or self._backend != "vllm": - return [self.generate(p, system_prompt, max_new_tokens, temperature, top_p, **kwargs)[0] - for p in prompts] + return [self.generate(p, system_prompt, **kwargs)[0] for p in prompts] - # Use async vLLM generation for multiple prompts - batch = asyncio.run(self._generate_vllm_async( - prompts, system_prompt, max_new_tokens, temperature, top_p, **kwargs - )) + # Use async vLLM batch generation for multiple prompts + batch = asyncio.run(self._generate_vllm_async(prompts, system_prompt, **kwargs)) # Update token counts - self._update_token_counts(batch, len(batch)) + self._update_token_counts(batch) return batch @@ -585,34 +668,30 @@ def _check_limits(self) -> None: def _handle_generation_error(self, error: Exception, attempt: int) -> None: - """Handle generation errors with retry logic.""" - print(f"{type(error)} when generating response:") - print(error) - print(f"Attempt {attempt} of {MAX_ATTEMPTS}, retrying in {RETRY_DELAY} seconds...") - time.sleep(RETRY_DELAY) + """Handle generation errors with exponential backoff retry logic.""" + cooldown = BASE_COOLDOWN * (2 ** (attempt - 1)) + logger.warning("%s when generating response: %s — attempt %d of %d, retrying in %d seconds...", + type(error).__name__, error, attempt, MAX_ATTEMPTS, cooldown) + time.sleep(cooldown) if attempt == MAX_ATTEMPTS: raise RuntimeError("Max attempts reached, unable to generate response.") from error - def _update_token_counts(self, responses: List[GenericResponse], n: int) -> None: + def _update_token_counts(self, responses: List[GenericResponse]) -> None: """Update internal token and request counts.""" in_tokens = int(sum(r.prompt_tokens for r in responses)) out_tokens = int(sum(r.completion_tokens for r in responses)) self._input_token_count += in_tokens self._output_token_count += out_tokens - self._request_count += n + self._request_count += len(responses) def generate( self, prompt: str, system_prompt: Optional[str] = None, - max_new_tokens: int = 2048, - temperature: Optional[float] = None, - top_p: Optional[float] = None, - n: int = 1, **kwargs ) -> List[GenericResponse]: """ Generate text using the specified backend. @@ -635,9 +714,7 @@ def generate( try: # Generate response start_time = time.time() - responses = self._generator( - prompt, system_prompt, max_new_tokens, temperature, top_p, n, **kwargs - ) + responses = self._generator(prompt, system_prompt, **kwargs) # Add request to recent requests for rate limiting if self._rpm_limit is not None: @@ -646,7 +723,10 @@ def generate( self._recent_requests.append((start_time, out_tokens)) # Update token counts and return - self._update_token_counts(responses, n) + self._update_token_counts(responses) + logger.debug("Generation complete: %d prompt tokens, %d completion tokens.", + int(sum(r.prompt_tokens for r in responses)), + int(sum(r.completion_tokens for r in responses))) return responses except google.api_core.exceptions.GoogleAPIError as e: diff --git a/src/translate/naive/naive_translator.py b/src/translate/naive/naive_translator.py index 55a96b03..b906efdd 100644 --- a/src/translate/naive/naive_translator.py +++ b/src/translate/naive/naive_translator.py @@ -2,12 +2,15 @@ as much context in the prompt as possible. """ # std imports +import logging import os import sys import re import json from typing import Dict, Tuple, Union, Literal, List, Optional +logger = logging.getLogger("pareval-repo") + # tpl imports from alive_progress import alive_it @@ -21,8 +24,6 @@ # Constants DEFAULT_TERMINAL_COLS = 80 -DEFAULT_TEMPERATURE = 0.2 -DEFAULT_TOP_P = 0.95 DEFAULT_CHUNK_MAX_TOKENS = 1024 CODE_BLOCK_PATTERN = re.compile(r"```(?:[+\w]+)?\n(.*?)\n```", re.DOTALL) INTERACTIONS_DIR = "interactions" @@ -51,7 +52,12 @@ def __init__( enable_chunking: bool = False, log_interactions: bool = False, dry: bool = False, - hide_progress: bool = False + hide_progress: bool = False, + api_key: Optional[str] = None, + api_base_url: Optional[str] = None, + vllm_environment: Optional[str] = None, + vllm_yaml_config: Optional[str] = None, + vllm_keepalive_id: Optional[int] = None, ): # Validate inputs self._validate_inputs(input_repo, output_repos, src_model, dst_model, @@ -61,7 +67,12 @@ def __init__( dst_config, log_interactions, dry, hide_progress) GeneratorMixin.__init__(self, backend, llm_name, - system_prompt=self._get_system_prompt()) + system_prompt=self._get_system_prompt(), + api_key=api_key, + api_base_url=api_base_url, + vllm_environment=vllm_environment, + vllm_yaml_config=vllm_yaml_config, + vllm_keepalive_id=vllm_keepalive_id) if enable_chunking: self._chunk_agent = ChunkFileAgent(self, max_tokens=DEFAULT_CHUNK_MAX_TOKENS) @@ -128,6 +139,11 @@ def parse_args(args: 'Namespace') -> Dict[str, str]: # type: ignore # noqa: F82 "backend": args.naive_backend, "llm_name": args.naive_llm_name, "enable_chunking": args.naive_enable_chunking, + "api_key": args.api_key, + "api_base_url": args.api_base_url, + "vllm_environment": args.vllm_environment, + "vllm_yaml_config": args.vllm_yaml_config, + "vllm_keepalive_id": args.vllm_keepalive_id, } @@ -229,7 +245,7 @@ def _build_base_prompt(self, fname: os.PathLike, chunk: Optional[str], Formatted base prompt """ if chunk: - print(chunk) # Debug output for chunking + logger.debug("Chunk content:\n%s", chunk) return nc.CHUNK_PROMPT_TEMPLATE.format( src_model=self._src_model, dst_model=self._dst_model, @@ -361,7 +377,7 @@ def _postprocess(self, output: str) -> Optional[str]: """ match = CODE_BLOCK_PATTERN.search(output) if match is None: - print(f"No code block found in output:\n{output}") + logger.warning("No code block found in output:\n%s", output) return None return match.group(1) @@ -464,9 +480,9 @@ def _write_metadata(self, repo_path: os.PathLike) -> None: with open(exp_meta_fpath, 'w', encoding="UTF-8") as f: json.dump(exp_meta_dict, f, indent=4) - print(f"Wrote translation experiment metadata to {exp_meta_fpath}") + logger.debug("Wrote translation experiment metadata to %s", exp_meta_fpath) except (OSError, json.JSONEncodeError) as e: - print(f"Error writing metadata file: {e}") + logger.error("Error writing metadata file: %s", e) raise @@ -512,12 +528,7 @@ def _translate_file(self, fpath: os.PathLike, chunk: Optional[str] = None, self._handle_dry_run(prompt, fpath, output_fpaths) return - responses = self.generate( - prompt, - temperature=DEFAULT_TEMPERATURE, - top_p=DEFAULT_TOP_P, - n=len(self._output_paths) - ) + responses = [self.generate(prompt)[0] for _ in self._output_paths] self._process_responses(responses, prompt, fpath, output_fpaths, chunk, chunk_id) @@ -531,9 +542,9 @@ def _handle_dry_run(self, prompt: str, fpath: os.PathLike, fpath: The file path being translated output_fpaths: List of output file paths """ - print(prompt) - print(f"Skipped translation of {fpath} to " + - f"{output_fpaths[0]}..{output_fpaths[-1]} for dry run.") + logger.debug("Dry-run prompt:\n%s", prompt) + logger.info("Skipped translation of %s to %s..%s for dry run.", + fpath, output_fpaths[0], output_fpaths[-1]) def _process_responses(self, responses: List[GenericResponse], prompt: str, @@ -560,7 +571,7 @@ def _process_responses(self, responses: List[GenericResponse], prompt: str, # Process output output = self._postprocess(response.response) if output is None: - print(f"Failed to translate {fpath} to {output_fpath}") + logger.warning("Failed to translate %s to %s.", fpath, output_fpath) continue # Write to file @@ -587,25 +598,17 @@ def _write_translated_file(self, output_fpath: os.PathLike, with open(output_fpath, open_mode, encoding="UTF-8") as f: f.write(output) except OSError as e: - print(f"Error writing file {output_fpath}: {e}") + logger.error("Error writing file %s: %s", output_fpath, e) raise def _print_translation_status(self, fpath: os.PathLike, output_fpath: os.PathLike, chunk: Optional[str], chunk_id: int) -> None: - """Print translation status message. - - Args: - fpath: Original file path - output_fpath: Output file path - chunk: Optional chunk being translated - chunk_id: ID of the chunk - """ - print(f"Translated {fpath} to {output_fpath}", end="") + """Log translation status message.""" if chunk: - print(f" (chunk {chunk_id})") + logger.info("Translated %s to %s (chunk %d).", fpath, output_fpath, chunk_id) else: - print("") + logger.info("Translated %s to %s.", fpath, output_fpath) def translate(self) -> None: @@ -644,9 +647,9 @@ def _print_translation_start(self, num_translations: int, repo_paths: List of repository paths all_files: List of files to translate """ - print(f"Beginning {num_translations} batched translation(s) " + - f"starting from {repo_paths[0]} using {self._llm_name} with NaiveTranslator.") - print(f"Files to translate: {all_files}") + logger.info("Beginning %d batched translation(s) starting from %s using %s with NaiveTranslator.", + num_translations, repo_paths[0], self._llm_name) + logger.debug("Files to translate: %s", all_files) def _translate_single_file(self, fpath: os.PathLike) -> None: @@ -660,10 +663,10 @@ def _translate_single_file(self, fpath: os.PathLike) -> None: return # Handle chunking - print(f"Chunking file {fpath}...") + logger.debug("Chunking file %s...", fpath) source_code = self._input_repo.get_file_contents(rel_path=fpath) chunks = self._chunk_agent.chunk_file(source_code) - print(f"Chunked {fpath} into {len(chunks)} chunks.") + logger.debug("Chunked %s into %d chunk(s).", fpath, len(chunks)) if len(chunks) > 1: for i, chunk in enumerate(chunks): diff --git a/src/translate/repo.py b/src/translate/repo.py index 458ef1d7..4582be37 100644 --- a/src/translate/repo.py +++ b/src/translate/repo.py @@ -5,10 +5,13 @@ date: April 2024 """ # std imports +import logging import os from typing import Optional, List import json +logger = logging.getLogger("pareval-repo") + TASK_FILE = "translation_task.md" class Repo: diff --git a/src/translate/swe_agent/swe_agent_translator.py b/src/translate/swe_agent/swe_agent_translator.py index 9cb1839d..a0048352 100644 --- a/src/translate/swe_agent/swe_agent_translator.py +++ b/src/translate/swe_agent/swe_agent_translator.py @@ -1,6 +1,7 @@ """ Class that invokes SWE-agent to perform code translation. """ # std imports +import logging import os import shutil import subprocess @@ -11,6 +12,8 @@ from translator import Translator from repo import Repo +logger = logging.getLogger("pareval-repo") + class SWEAgentTranslator(Translator): """Translator that uses SWE-agent to perform code translation.""" @@ -108,26 +111,26 @@ def _execute_translation_workflow(self) -> None: self.initialize_temp_repo() if self.run_swe_agent(): - print("Saving translated output...") + logger.info("Saving translated output...") self.save_output(self._output_path) self.remove_unnecessary_output_files() self.write_experiment_metadata() else: - print("Translation failed.") + logger.error("Translation failed.") def generate_translation_task(self) -> None: """Generate the translation task file for SWE-agent.""" - print("Generating translation task...") + logger.info("Generating translation task...") translation_task = self._create_translation_task_content() try: with open(self._translation_task_path, "w", encoding="utf-8") as f: f.write(translation_task) - print(f"Translation task generated: {self._translation_task_path}") + logger.debug("Translation task written to %s.", self._translation_task_path) except IOError as e: - print(f"Error writing translation task: {e}") + logger.error("Error writing translation task: %s", e) raise def _create_translation_task_content(self) -> str: @@ -154,7 +157,7 @@ def _create_translation_task_content(self) -> str: def initialize_temp_repo(self) -> None: """Initialize the temporary repository and perform initial Git setup.""" - print("Initializing temporary Git repository...") + logger.info("Initializing temporary Git repository...") self._prepare_temp_directory() self._copy_source_to_temp() self._initialize_git_repo() @@ -162,7 +165,7 @@ def initialize_temp_repo(self) -> None: def _prepare_temp_directory(self) -> None: """Remove existing temp directory if it exists.""" if os.path.exists(self._temp_repo_path): - print("The temporary repository exists. Removing the repository...") + logger.debug("Temporary repository exists at %s; removing it.", self._temp_repo_path) shutil.rmtree(self._temp_repo_path) def _copy_source_to_temp(self) -> None: @@ -179,20 +182,20 @@ def _initialize_git_repo(self) -> None: def run_swe_agent(self) -> bool: """Run the SWE-agent command and apply the resulting patch.""" command = self._build_swe_agent_command() - print(f"Running SWE-agent command: {' '.join(command)}") + logger.info("Running SWE-agent command: %s", " ".join(command)) try: process = subprocess.run(command, text=True, cwd=self._temp_repo_path, check=True) if process.returncode == 0: - print("SWE-agent command executed successfully.") + logger.info("SWE-agent command executed successfully.") return self._apply_swe_agent_patch() else: - print(f"Command failed with return code {process.returncode}.") + logger.error("SWE-agent command failed with return code %d.", process.returncode) return False except Exception as e: - print(f"An error occurred: {e}") + logger.error("An error occurred running SWE-agent: %s", e) return False def _build_swe_agent_command(self) -> List[str]: @@ -208,13 +211,13 @@ def _build_swe_agent_command(self) -> List[str]: def _apply_swe_agent_patch(self) -> bool: """Find and apply the patch file generated by SWE-agent.""" - print("Applying patch...") + logger.info("Applying patch...") trajectories_dir = os.path.join(self._temp_repo_path, self.TRAJECTORIES_DIR) patch_file_path = self._find_patch_file(trajectories_dir) if patch_file_path is None: - print("Error: No patch file found in trajectories directory.") + logger.error("No patch file found in trajectories directory.") return False return self._apply_patch_file(patch_file_path) @@ -238,10 +241,10 @@ def _apply_patch_file(self, patch_file_path: str) -> bool: cwd=self._temp_repo_path, check=True ) - print("Patch applied successfully.") + logger.info("Patch applied successfully.") return True except (OSError, subprocess.CalledProcessError) as e: - print(f"Error applying patch: {e}") + logger.error("Error applying patch: %s", e) return False @@ -260,19 +263,19 @@ def save_output(self, output_dir: str) -> None: if os.path.exists(git_dir): shutil.rmtree(git_dir) except (OSError, shutil.Error) as e: - print(f"Error saving output: {e}") + logger.error("Error saving output: %s", e) raise def remove_unnecessary_output_files(self) -> None: """Remove unnecessary files (any .cu or .cuh files) from the output.""" - print(f"Cleaning the output repository: {self._output_path}") + logger.debug("Cleaning the output repository: %s", self._output_path) try: self._remove_files_by_extension(self._output_path, self.REMOVE_EXTENSIONS) - print(f"Finished cleaning the output repository: {self._output_path}") + logger.debug("Finished cleaning the output repository: %s", self._output_path) except OSError as e: - print(f"Error cleaning output files: {e}") + logger.error("Error cleaning output files: %s", e) raise def _remove_files_by_extension(self, directory: str, extensions: tuple) -> None: @@ -296,9 +299,9 @@ def write_experiment_metadata(self) -> None: with open(exp_meta_fpath, 'w', encoding='utf-8') as f: json.dump(metadata, f, indent=4) - print(f"Experiment metadata written to {exp_meta_fpath}.") + logger.debug("Experiment metadata written to %s.", exp_meta_fpath) except (OSError, json.JSONEncodeError) as e: - print(f"Error writing experiment metadata: {e}") + logger.error("Error writing experiment metadata: %s", e) raise def _create_experiment_metadata(self) -> Dict[str, Any]: @@ -318,12 +321,12 @@ def _create_experiment_metadata(self) -> Dict[str, Any]: def cleanup_temp_repo(self) -> None: """Remove the temporary repository.""" - print("Cleaning up temporary repository...") + logger.info("Cleaning up temporary repository...") try: if os.path.exists(self._temp_repo_path): shutil.rmtree(self._temp_repo_path) - print("Temporary repository cleaned up.") + logger.debug("Temporary repository cleaned up.") except OSError as e: - print(f"Error cleaning up temporary repository: {e}") + logger.warning("Error cleaning up temporary repository: %s", e) # Don't raise here as this is cleanup code diff --git a/src/translate/top_down_agentic/chunk_agent.py b/src/translate/top_down_agentic/chunk_agent.py index 58726581..ba801677 100644 --- a/src/translate/top_down_agentic/chunk_agent.py +++ b/src/translate/top_down_agentic/chunk_agent.py @@ -4,10 +4,13 @@ that can be processed by LLMs within token limits. """ +import logging from typing import List, Union, Optional import os from math import ceil +logger = logging.getLogger("pareval-repo") + # Third-party imports from langchain_text_splitters import Language from langchain_text_splitters import RecursiveCharacterTextSplitter as rcts @@ -72,14 +75,14 @@ def chunk_file(self, source_code: str) -> List[str]: return [""] if self._is_too_long(source_code): - print("Chunking file...") + logger.debug("File exceeds token limit; chunking...") try: docs = self._splitter.create_documents([source_code]) chunks = [doc.page_content for doc in docs] - print(f"Split into {len(chunks)} chunks") + logger.debug("Split into %d chunks.", len(chunks)) return chunks except Exception as e: - print(f"Error chunking file: {e}") + logger.warning("Error chunking file: %s", e) # Fallback: return original code if chunking fails return [source_code] diff --git a/src/translate/top_down_agentic/context_agent.py b/src/translate/top_down_agentic/context_agent.py index ec662365..1dc464df 100644 --- a/src/translate/top_down_agentic/context_agent.py +++ b/src/translate/top_down_agentic/context_agent.py @@ -4,9 +4,12 @@ to help with the translation of subsequent files in the dependency chain. """ +import logging import os from typing import List, Optional, Union from generator_mixin import GeneratorMixin + +logger = logging.getLogger("pareval-repo") from .dependency_agent import FileNode from .utils import read_file_safely @@ -42,7 +45,7 @@ def get_contexts(self, dependencies: List[FileNode], Returns: List of context strings for each output path """ - print("Extracting context for dependent files...") + logger.debug("Extracting context from %d dependent file(s)...", len(dependencies)) if not dependencies: return ["" for _ in self._output_paths] @@ -97,14 +100,10 @@ def _create_context_prompt(self, translated_codes: List[str], def _generate_contexts(self, prompts: List[str]) -> List[str]: """Generate context responses using the LLM.""" try: - response_obs = self._generator.generate_async( - prompts, - temperature=0.2, - top_p=0.95 - ) + response_obs = self._generator.generate_async(prompts) return [r.response.strip() if r.response else "" for r in response_obs] except Exception as e: - print(f"Error generating contexts: {e}") + logger.error("Error generating contexts: %s", e) return ["" for _ in prompts] diff --git a/src/translate/top_down_agentic/dependency_agent.py b/src/translate/top_down_agentic/dependency_agent.py index fbdf0418..e09e3f97 100644 --- a/src/translate/top_down_agentic/dependency_agent.py +++ b/src/translate/top_down_agentic/dependency_agent.py @@ -13,6 +13,8 @@ import logging import os import subprocess + +logger = logging.getLogger("pareval-repo") from glob import glob from typing import List, Optional, Union, Callable from graphlib import TopologicalSorter, CycleError @@ -193,7 +195,7 @@ def get_cpp_source_file_dependencies(self, source_file: Union[str, os.PathLike], Returns: List of dependencies or None if failed """ - print(f"Getting dependencies for {source_file} using clang -MM") + logger.debug("Getting dependencies for %s using clang -MM", source_file) compiler = self._get_compiler_for_file(source_file) if not compiler: @@ -230,7 +232,7 @@ def _build_clang_command(self, compiler: str, source_file: Union[str, os.PathLik for d in include_dirs: cmd.append(f"-I{d}") except OSError as e: - logging.warning(f"Could not list directories in {repo_path}: {e}") + logger.warning("Could not list directories in %s: %s", repo_path, e) return cmd @@ -247,7 +249,7 @@ def _run_clang_command(self, cmd: List[str], repo_path: Union[str, os.PathLike]) ) if run_result.returncode != 0: - print(f"Error running command: {run_result.stderr}") + logger.warning("clang -MM failed: %s", run_result.stderr.strip()) return None deps = run_result.stdout.strip() @@ -261,7 +263,7 @@ def _run_clang_command(self, cmd: List[str], repo_path: Union[str, os.PathLike]) return [] except Exception as e: - logging.error(f"Error running clang command: {e}") + logger.error("Error running clang command: %s", e) return None @@ -278,10 +280,10 @@ def get_source_file_dependencies_with_llm(self, source_file: Union[str, os.PathL Returns: List of dependencies or None if failed """ - print(f"Getting dependencies for {source_file} using LLM") + logger.debug("Getting dependencies for %s using LLM", source_file) if source_files is None: - logging.warning("No source files provided to LLM") + logger.warning("No source files provided to LLM dependency analysis.") source_files = [""] try: @@ -298,7 +300,7 @@ def get_source_file_dependencies_with_llm(self, source_file: Union[str, os.PathL return deps except Exception as e: - logging.error(f"Error getting LLM dependencies for {source_file}: {e}") + logger.error("Error getting LLM dependencies for %s: %s", source_file, e) return None @@ -309,7 +311,7 @@ def _read_file_lines(self, source_file: Union[str, os.PathLike], num_lines: int) lines = f.readlines()[:num_lines] return "```\n" + "".join(lines) + "\n```" except Exception as e: - logging.error(f"Error reading file {source_file}: {e}") + logger.error("Error reading file %s: %s", source_file, e) return "```\n```" @@ -332,7 +334,7 @@ def _parse_llm_dependencies(self, response: str) -> List[str]: """Parse the LLM response to extract dependencies.""" deps = response.strip().split("\n") deps = [dep.strip() for dep in deps if dep.strip()] - print(f"LLM identified dependencies: {format_dependency_list(deps)}") + logger.debug("LLM identified dependencies: %s", format_dependency_list(deps)) return deps @@ -349,7 +351,7 @@ def _log_llm_interaction(self, source_file: Union[str, os.PathLike], f.write(f"Prompt:\n{prompt}\n") f.write(f"Dependencies:\n{format_dependency_list(deps)}\n\n") except Exception as e: - logging.warning(f"Could not log interaction to {path}: {e}") + logger.warning("Could not log interaction to %s: %s", path, e) def get_source_file_dependencies(self, source_file: Union[str, os.PathLike], @@ -376,7 +378,7 @@ def get_source_file_dependencies(self, source_file: Union[str, os.PathLike], if deps is not None: return [os.path.join(repo_path, dep) for dep in deps] - logging.warning("Could not get dependencies for %s", source_file) + logger.warning("Could not get dependencies for %s", source_file) return None @@ -402,8 +404,8 @@ def construct_dependency_graph(self, repo_path: Union[str, os.PathLike]) -> List # Construct the graph roots = self._construct_file_nodes(sorted_files, dependencies) - print(f"Constructed dependency graph with {len(roots)} roots") - print(f"Roots: {[root.rel_path for root in roots]}") + logger.debug("Constructed dependency graph with %d root(s): %s", + len(roots), [root.rel_path for root in roots]) return roots @@ -413,8 +415,8 @@ def _collect_repository_files(self, repo_path: Union[str, os.PathLike]) -> tuple build_files = self.get_all_build_files(repo_path) all_files = list(set(source_files + build_files)) - print(f"Source files: {source_files}") - print(f"Build files: {build_files}") + logger.debug("Source files: %s", source_files) + logger.debug("Build files: %s", build_files) return source_files, build_files, all_files diff --git a/src/translate/top_down_agentic/top_down_agentic.py b/src/translate/top_down_agentic/top_down_agentic.py index 6c4f8856..9d836ea8 100644 --- a/src/translate/top_down_agentic/top_down_agentic.py +++ b/src/translate/top_down_agentic/top_down_agentic.py @@ -11,12 +11,15 @@ Date: November 2024 """ +import logging import os import sys import json from typing import Dict, Literal, Optional, List, Union, Tuple import re +logger = logging.getLogger("pareval-repo") + # Local imports sys.path.append(os.path.join(os.path.dirname(__file__), "..")) from translator import Translator @@ -52,7 +55,12 @@ def __init__( backend: Literal["openai", "gemini", "hf", "vllm", "local"] = "openai", log_interactions: bool = False, dry: bool = False, - hide_progress: bool = False + hide_progress: bool = False, + api_key: Optional[str] = None, + api_base_url: Optional[str] = None, + vllm_environment: Optional[str] = None, + vllm_yaml_config: Optional[str] = None, + vllm_keepalive_id: Optional[int] = None, ): """Initialize the top-down agent translator. @@ -67,6 +75,11 @@ def __init__( log_interactions: Whether to log LLM interactions dry: Whether to run in dry-run mode hide_progress: Whether to hide progress bars + api_key: API key for the LLM backend + api_base_url: Base URL for the LLM backend API endpoint + vllm_environment: Path to the Python venv for launching a vLLM server + vllm_yaml_config: Path to a vLLM YAML configuration file + vllm_keepalive_id: If set, write vLLM server PID to a file with this ID """ super().__init__(input_repo, output_repos, src_model, dst_model, dst_config, log_interactions, dry, hide_progress) @@ -76,7 +89,12 @@ def __init__( dst_model=self._dst_model) GeneratorMixin.__init__(self, backend, llm_name, system_prompt=self._system_prompt, - async_mode=True) + async_mode=True, + api_key=api_key, + api_base_url=api_base_url, + vllm_environment=vllm_environment, + vllm_yaml_config=vllm_yaml_config, + vllm_keepalive_id=vllm_keepalive_id) self._interactions_paths = self._setup_interaction_logging() self._dependency_agent = self._create_dependency_agent() @@ -142,7 +160,12 @@ def parse_args(args: 'Namespace') -> Dict[str, str]: # type: ignore # noqa: F821 """ return { "backend": args.top_down_agentic_backend, - "llm_name": args.top_down_agentic_llm_name + "llm_name": args.top_down_agentic_llm_name, + "api_key": args.api_key, + "api_base_url": args.api_base_url, + "vllm_environment": args.vllm_environment, + "vllm_yaml_config": args.vllm_yaml_config, + "vllm_keepalive_id": args.vllm_keepalive_id, } @@ -206,7 +229,7 @@ def _write_file(self, rel_path: Union[str, os.PathLike], contents: str, idx: int success = write_file_safely(output_file_path, contents) if success: - print(f"Wrote file {output_file_path}") + logger.debug("Wrote file %s.", output_file_path) return success @@ -235,11 +258,11 @@ def _write_metadata(self, repo_path: Union[str, os.PathLike]) -> bool: success = write_file_safely(exp_meta_fpath, json.dumps(exp_meta_dict, indent=4)) if success: - print(f"Wrote translation experiment metadata to {exp_meta_fpath}") + logger.debug("Wrote translation experiment metadata to %s.", exp_meta_fpath) return success except Exception as e: - print(f"Error writing metadata: {e}") + logger.error("Error writing metadata: %s", e) return False @@ -266,12 +289,12 @@ def _log_interaction(self, prompts: List[str], responses: List[str], f.write("RESPONSE:\n") f.write(responses[i] + "\n\n") except Exception as e: - print(f"Error logging interaction to {output_path}: {e}") + logger.error("Error logging interaction to %s: %s", output_path, e) def translate(self): """Use the top-down method to translate the entire repository.""" - print(f"Constructing dependency graph on {self._input_repo.path}...") + logger.info("Constructing dependency graph on %s...", self._input_repo.path) # Build dependency graph and prepare for translation dep_graph = self._dependency_agent.construct_dependency_graph(self._input_repo.path) @@ -316,7 +339,7 @@ def _translate_node(self, node: FileNode, graph: Optional[List[FileNode]] = None graph: Optional dependency graph for build files tree: Optional file tree for build files """ - print(f"Translating file {node.rel_path}...") + logger.info("Translating file %s...", node.rel_path) # Get source code and context source_code = self._read_file(node.rel_path) @@ -329,6 +352,7 @@ def _translate_node(self, node: FileNode, graph: Optional[List[FileNode]] = None # Write translations to output files self._write_translations(node, translations, trigger_rename) + logger.debug("Completed translation of %s.", node.rel_path) def _translate_file_content(self, source_code: str, contexts: List[str], @@ -347,7 +371,7 @@ def _translate_single_chunk(self, chunk: str, contexts: List[str], node: FileNode, graph: Optional[List[FileNode]] = None, tree: Optional[str] = None) -> Tuple[List[str], Optional[str]]: """Translate a single chunk of code.""" - print("Requesting whole file translation...") + logger.debug("Requesting whole-file translation.") responses, trigger_rename = self._get_translations(contexts, chunk, node, graph, tree) translations = [extract_code_block(response) for response in responses] return translations, trigger_rename @@ -362,7 +386,7 @@ def _translate_multiple_chunks(self, chunks: List[str], contexts: List[str], trigger_rename = None for i, chunk in enumerate(chunks): - print(f"Requesting chunk translation... [{i + 1}/{len(chunks)}]") + logger.debug("Requesting chunk translation [%d/%d].", i + 1, len(chunks)) responses, trigger_rename = self._get_translations( contexts, chunk, node, graph, tree, prev_chunks=prev_chunks ) @@ -529,7 +553,7 @@ def _add_build_addendum(self, prompts: List[str], file: FileNode, def _generate_translations(self, prompts: List[str]) -> Tuple[List[str], List[Union[str, None]]]: """Generate translations using the LLM.""" - response_obs = self.generate_async(prompts, temperature=0.2, top_p=0.95) + response_obs = self.generate_async(prompts) responses = [r.response for r in response_obs] reasonings = [r.reasoning for r in response_obs] return responses, reasonings diff --git a/src/translate/top_down_agentic/utils.py b/src/translate/top_down_agentic/utils.py index 7a891f54..0b2cb88a 100644 --- a/src/translate/top_down_agentic/utils.py +++ b/src/translate/top_down_agentic/utils.py @@ -4,11 +4,14 @@ used across the different agent classes to reduce code duplication. """ +import logging import os import re from typing import List, Optional, Union, Dict, Any from enum import Enum +logger = logging.getLogger("pareval-repo") + class FileType(Enum): """An enumeration of file types.""" @@ -68,7 +71,7 @@ def extract_code_block(output: str) -> Optional[str]: CODE_BLOCK_PATTERN = re.compile(r"```(?:[+\w]+)?\n(.*?)\n```", re.DOTALL) match = CODE_BLOCK_PATTERN.search(output) if match is None: - print(f"No code block found in output:\n{output}") + logger.warning("No code block found in output:\n%s", output) return None return match.group(1) @@ -88,7 +91,7 @@ def read_file_safely(file_path: Union[str, os.PathLike], encoding: str = "UTF-8" except FileNotFoundError: return "" except Exception as e: - print(f"Error reading file {file_path}: {e}") + logger.error("Error reading file %s: %s", file_path, e) return "" @@ -100,7 +103,7 @@ def write_file_safely(file_path: Union[str, os.PathLike], contents: str, encodin f.write(contents) return True except Exception as e: - print(f"Error writing file {file_path}: {e}") + logger.error("Error writing file %s: %s", file_path, e) return False diff --git a/src/translate/translate.py b/src/translate/translate.py index 87a59df2..96094c09 100755 --- a/src/translate/translate.py +++ b/src/translate/translate.py @@ -8,6 +8,7 @@ """ # std imports from argparse import ArgumentParser +import logging import os import json import shutil @@ -19,22 +20,62 @@ from top_down_agentic.top_down_agentic import TopDownAgenticTranslator from swe_agent.swe_agent_translator import SWEAgentTranslator +logger = logging.getLogger("pareval-repo") + + def get_args(): parser = ArgumentParser(description=__doc__) - parser.add_argument("-i", "--input", type=str, required=True, help="Path to the input source code repository.") - parser.add_argument("-o", "--output", type=str, required=True, help="Path to the output source code repository.") - parser.add_argument("-c", "--config", type=str, required=True, help="Path to translation destination model configuration file containing prompt fill-ins.") - parser.add_argument("-f", "--force-overwrite", action="store_true", help="Force overwrite of existing output directory.") - parser.add_argument("--method", choices=["naive", "top-down-agentic", "swe-agent"], required=True, help="The translation method to use.") - parser.add_argument("--src-model", type=str, required=True, help="The source execution model.") - parser.add_argument("--dst-model", type=str, required=True, help="The destination execution model.") - parser.add_argument("--output-id", type=int, required=True, help="The integer ID of the output, used to count repeat instances of the same translation configuration.") - parser.add_argument("-n", "--num-translations", type=int, default=1, help="The number of translations to generate. Currently only supported for OpenAI and vLLM with naive, will write to directories numbered from output-id through output-id + num-translations - 1.") - parser.add_argument("--app-name", type=str, help="The name of the application being translated.") - parser.add_argument("--dry", "-d", action="store_true", help="Dry run the translation.") - parser.add_argument("--log-interactions", action="store_true", help="Log the raw LLM outputs to a text file.") - parser.add_argument("--hide-progress", action="store_true", help="Hide the progress bar.") - parser.add_argument("--tar-outputs", action="store_true", help="Create a tarball of the output directories.") + parser.add_argument("-i", "--input", type=str, required=True, + help="Path to the input source code repository.") + parser.add_argument("-o", "--output", type=str, required=True, + help="Path to the output source code repository.") + parser.add_argument("-c", "--config", type=str, required=True, + help="Path to translation destination model configuration file containing " + "prompt fill-ins.") + parser.add_argument("-f", "--force-overwrite", action="store_true", + help="Force overwrite of existing output directory.") + parser.add_argument("--method", choices=["naive", "top-down-agentic", "swe-agent"], + required=True, help="The translation method to use.") + parser.add_argument("--src-model", type=str, required=True, + help="The source execution model.") + parser.add_argument("--dst-model", type=str, required=True, + help="The destination execution model.") + parser.add_argument("--output-id", type=int, required=True, + help="The integer ID of the output, used to count repeat instances of the " + "same translation configuration.") + parser.add_argument("-n", "--num-translations", type=int, default=1, + help="The number of translations to generate. Currently only supported " + "for OpenAI and vLLM with naive, will write to directories numbered from " + "output-id through output-id + num-translations - 1 inclusive.") + parser.add_argument("--app-name", type=str, + help="The name of the application being translated.") + parser.add_argument("--dry", "-d", action="store_true", + help="Dry run the translation.") + parser.add_argument("--log-interactions", action="store_true", + help="Log the raw LLM outputs to a text file.") + parser.add_argument("--hide-progress", action="store_true", + help="Hide the progress bar.") + parser.add_argument("--tar-outputs", action="store_true", + help="Create a tarball of the output directories.") + parser.add_argument("-l", "--log-level", type=str, default="INFO", + choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], + help="Logging level (default: INFO).") + + # GeneratorMixin arguments shared across all LLM-based translation methods + parser.add_argument("--api-key", type=str, default=None, + help="API key for the LLM backend, overriding any backend-specific " + "environment variable.") + parser.add_argument("--api-base-url", type=str, default=None, + help="Base URL for the LLM backend API endpoint.") + parser.add_argument("--vllm-environment", type=str, default=None, + help="Path to the Python virtual environment to use when launching a vLLM " + "server.") + parser.add_argument("--vllm-yaml-config", type=str, default=None, + help="Path to a vLLM YAML configuration file passed to the server via " + "--config.") + parser.add_argument("--vllm-keepalive-id", type=int, default=None, + help="If set, write the vLLM server PID to a file with this ID instead of " + "terminating the server on exit.") # subgroup of arguments for the naive translation method naive_args = parser.add_argument_group("naive translation method") @@ -63,6 +104,14 @@ def get_translator_cls(method: str): def main(): args = get_args() + # Configure the package logger + _handler = logging.StreamHandler() + _handler.setFormatter(logging.Formatter( + "%(asctime)s [%(levelname)s] %(message)s", datefmt="%H:%M:%S" + )) + logger.addHandler(_handler) + logger.setLevel(getattr(logging, args.log_level)) + # check if the input directory exists if not os.path.exists(args.input): raise FileNotFoundError(f"Input directory {args.input} not found.") @@ -125,7 +174,7 @@ def create_tarball(output_dir): if os.path.exists(output_dir): create_tarball(output_dir) else: - print(f"Output directory {output_dir} does not exist. Skipping tarball creation.") + logger.warning("Output directory %s does not exist. Skipping tarball creation.", output_dir) # translator implements GeneratorMixin, then call print_stats if hasattr(translator, "print_stats"): diff --git a/src/translate/translator.py b/src/translate/translator.py index 57d0e79f..faa0969e 100644 --- a/src/translate/translator.py +++ b/src/translate/translator.py @@ -6,9 +6,12 @@ """ # std imports from abc import ABC, abstractmethod +import logging import os from typing import List +logger = logging.getLogger("pareval-repo") + # local imports from repo import Repo diff --git a/translate-runner.sh b/translate-runner.sh new file mode 100644 index 00000000..0d3f5a2a --- /dev/null +++ b/translate-runner.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# Runner for translate-sweep.sbatch job array. +# Each array task handles one (method, translate_pair, app) combination, +# running num_translations=20 translate.py calls against a shared vLLM server. +# +# Source this from translate-sweep.sbatch, then call run_translate_array_task. +# Requires SLURM_ARRAY_TASK_ID to be set (provided automatically by Slurm). + +set -euo pipefail + +run_translate_array_task() { + local task_id="${SLURM_ARRAY_TASK_ID:-0}" + + # Task index decomposition — lists are defined in translate-sweep.sbatch. + local -a app_names + local -a translate_pairs + local -a methods + read -ra app_names <<< "${TRANSLATE_APPS:?TRANSLATE_APPS not set}" + read -ra translate_pairs <<< "${TRANSLATE_PAIRS:?TRANSLATE_PAIRS not set}" + read -ra methods <<< "${TRANSLATE_METHODS:?TRANSLATE_METHODS not set}" + local n_apps=${#app_names[@]} + local n_pairs=${#translate_pairs[@]} + local num_translations="${TRANSLATE_NUM:?TRANSLATE_NUM not set}" + + local method_idx=$(( task_id / (n_pairs * n_apps) )) + local pair_idx=$(( (task_id / n_apps) % n_pairs )) + local app_idx=$(( task_id % n_apps )) + + local method="${methods[$method_idx]}" + local translate_pair="${translate_pairs[$pair_idx]}" + local app_name="${app_names[$app_idx]}" + local src_model + local dst_model + src_model=$(echo "$translate_pair" | cut -d',' -f1) + dst_model=$(echo "$translate_pair" | cut -d',' -f2) + + echo "Task ${task_id}: method=${method} src=${src_model} dst=${dst_model} app=${app_name}" + + # Per-task isolated cache dirs in node-local memory (/dev/shm). + # Each array task gets its own slot so concurrent tasks don't collide. + local lmem_cache="/dev/shm/${USER}/.cache" + mkdir -p "$lmem_cache" + local slot="${SLURM_ARRAY_TASK_ID:-0}" + export TORCHINDUCTOR_CACHE_DIR="${lmem_cache}/torchinductor_${SLURM_JOB_ID}_${slot}" + export VLLM_CACHE_ROOT="${lmem_cache}/vllm_${SLURM_JOB_ID}_${slot}" + + # A keepalive ID unique to this array task causes GeneratorMixin to write a + # PID file for the vLLM server so it persists across all num_translations calls. + local keepalive_id="${SLURM_JOB_ID}_${slot}" + + for i in $(seq 0 $((num_translations - 1))); do + python src/translate/translate.py \ + -i "targets/${app_name}/${src_model}/" \ + -o ../restate-results/ \ + -c "targets/${app_name}/${dst_model}/" \ + --method "${method}" \ + --src-model "${src_model}" \ + --dst-model "${dst_model}" \ + -n 1 \ + --output-id "${i}" \ + --app-name "${app_name}" \ + --vllm-environment ../serve/.venv/ \ + --vllm-yaml-config config/perlmutter-vllm-oss.yaml \ + --vllm-keepalive-id "${keepalive_id}" \ + --naive-backend vllm \ + --naive-llm-name openai/gpt-oss-120b + done +} diff --git a/translate-sweep.sbatch b/translate-sweep.sbatch new file mode 100644 index 00000000..88bf1ab4 --- /dev/null +++ b/translate-sweep.sbatch @@ -0,0 +1,30 @@ +#!/bin/bash +#SBATCH -q regular +#SBATCH -A m5083 +#SBATCH -t 02:00:00 +#SBATCH -N 1 +#SBATCH -n 1 +#SBATCH -c 16 +#SBATCH -C "gpu&hbm80g" +#SBATCH --gpus-per-node 4 +#SBATCH --array=0-35 +#SBATCH -o translate-sweep-%A-%a.out +#SBATCH -e translate-sweep-%A-%a.out + +set -euo pipefail + +# Sweep configuration — edit here to change what the array tests. +# len(TRANSLATE_METHODS) * len(TRANSLATE_PAIRS) * len(TRANSLATE_APPS) must equal the --array bound +export TRANSLATE_APPS="nanoXOR microXORh microXOR XSBench SimpleMOC-kernel llm.c" +export TRANSLATE_PAIRS="cuda,openmp-offload cuda,kokkos openmp-threads,openmp-offload" +export TRANSLATE_METHODS="naive top-down-agentic" +export TRANSLATE_NUM=20 + +module load python/3.12 cudatoolkit/12.9 cudnn gcc-native/13.2 +export CC=gcc CXX=g++ + +cd "${SLURM_SUBMIT_DIR}" +. .venv/bin/activate + +. translate-runner.sh +run_translate_array_task diff --git a/translate.sbatch b/translate.sbatch new file mode 100644 index 00000000..801968ab --- /dev/null +++ b/translate.sbatch @@ -0,0 +1,48 @@ +#!/bin/bash +#SBATCH -q regular +#SBATCH -A m5083 +#SBATCH -t 02:00:00 +#SBATCH -N 1 +#SBATCH -n 1 +#SBATCH -c 16 +#SBATCH -C "gpu&hbm80g" +#SBATCH --gpus-per-node 4 +#SBATCH -o eg-translate-%j.out +#SBATCH -e eg-translate-%j.out + +set -euo pipefail + +module load python/3.12 cudatoolkit/12.9 cudnn gcc-native/13.2 +export CC=gcc CXX=g++ +. .venv/bin/activate + +app_names=( "nanoXOR" "microXORh" "microXOR" "XSBench" "SimpleMOC-kernel" "llm.c") +translate_pairs=("cuda,openmp-offload" "cuda,kokkos" "openmp-threads,openmp-offload") +methods=("naive" "top-down-agentic") +num_translations=20 + + +for method in "${methods[@]}"; do + for translate_pair in "${translate_pairs[@]}"; do + src_model=$(echo "$translate_pair" | cut -d',' -f1) + dst_model=$(echo "$translate_pair" | cut -d',' -f2) + for app_name in "${app_names[@]}"; do + for i in $(seq 0 $((num_translations - 1))); do + python src/translate/translate.py \ + -i "targets/${app_name}/${src_model}/" \ + -o ../restate-results/ \ + -c "targets/${app_name}/${dst_model}/" \ + --method "${method}" \ + --src-model "${src_model}" \ + --dst-model "${dst_model}" \ + -n 1 \ + --output-id "${i}" \ + --app-name "${app_name}" \ + --vllm-environment ../serve/.venv/ \ + --vllm-yaml-config config/perlmutter-vllm-oss.yaml \ + --naive-backend vllm \ + --naive-llm-name openai/gpt-oss-120b + done + done + done +done diff --git a/uv.lock b/uv.lock index 5e80588a..38c5ce97 100644 --- a/uv.lock +++ b/uv.lock @@ -780,6 +780,7 @@ dependencies = [ { name = "langchain-text-splitters" }, { name = "openai" }, { name = "pandas" }, + { name = "tiktoken" }, ] [package.metadata] @@ -790,6 +791,7 @@ requires-dist = [ { name = "langchain-text-splitters", specifier = ">=0.3.11" }, { name = "openai", specifier = ">=1.107.3" }, { name = "pandas", specifier = ">=2.3.2" }, + { name = "tiktoken", specifier = ">=0.9.0" }, ] [[package]] @@ -984,6 +986,110 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" }, ] +[[package]] +name = "regex" +version = "2026.2.28" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8b/71/41455aa99a5a5ac1eaf311f5d8efd9ce6433c03ac1e0962de163350d0d97/regex-2026.2.28.tar.gz", hash = "sha256:a729e47d418ea11d03469f321aaf67cdee8954cde3ff2cf8403ab87951ad10f2", size = 415184, upload-time = "2026-02-28T02:19:42.792Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/db/8cbfd0ba3f302f2d09dd0019a9fcab74b63fee77a76c937d0e33161fb8c1/regex-2026.2.28-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e621fb7c8dc147419b28e1702f58a0177ff8308a76fa295c71f3e7827849f5d9", size = 488462, upload-time = "2026-02-28T02:16:22.616Z" }, + { url = "https://files.pythonhosted.org/packages/5d/10/ccc22c52802223f2368731964ddd117799e1390ffc39dbb31634a83022ee/regex-2026.2.28-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0d5bef2031cbf38757a0b0bc4298bb4824b6332d28edc16b39247228fbdbad97", size = 290774, upload-time = "2026-02-28T02:16:23.993Z" }, + { url = "https://files.pythonhosted.org/packages/62/b9/6796b3bf3101e64117201aaa3a5a030ec677ecf34b3cd6141b5d5c6c67d5/regex-2026.2.28-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bcb399ed84eabf4282587ba151f2732ad8168e66f1d3f85b1d038868fe547703", size = 288724, upload-time = "2026-02-28T02:16:25.403Z" }, + { url = "https://files.pythonhosted.org/packages/9c/02/291c0ae3f3a10cea941d0f5366da1843d8d1fa8a25b0671e20a0e454bb38/regex-2026.2.28-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7c1b34dfa72f826f535b20712afa9bb3ba580020e834f3c69866c5bddbf10098", size = 791924, upload-time = "2026-02-28T02:16:26.863Z" }, + { url = "https://files.pythonhosted.org/packages/0f/57/f0235cc520d9672742196c5c15098f8f703f2758d48d5a7465a56333e496/regex-2026.2.28-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:851fa70df44325e1e4cdb79c5e676e91a78147b1b543db2aec8734d2add30ec2", size = 860095, upload-time = "2026-02-28T02:16:28.772Z" }, + { url = "https://files.pythonhosted.org/packages/b3/7c/393c94cbedda79a0f5f2435ebd01644aba0b338d327eb24b4aa5b8d6c07f/regex-2026.2.28-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:516604edd17b1c2c3e579cf4e9b25a53bf8fa6e7cedddf1127804d3e0140ca64", size = 906583, upload-time = "2026-02-28T02:16:30.977Z" }, + { url = "https://files.pythonhosted.org/packages/2c/73/a72820f47ca5abf2b5d911d0407ba5178fc52cf9780191ed3a54f5f419a2/regex-2026.2.28-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e7ce83654d1ab701cb619285a18a8e5a889c1216d746ddc710c914ca5fd71022", size = 800234, upload-time = "2026-02-28T02:16:32.55Z" }, + { url = "https://files.pythonhosted.org/packages/34/b3/6e6a4b7b31fa998c4cf159a12cbeaf356386fbd1a8be743b1e80a3da51e4/regex-2026.2.28-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2791948f7c70bb9335a9102df45e93d428f4b8128020d85920223925d73b9e1", size = 772803, upload-time = "2026-02-28T02:16:34.029Z" }, + { url = "https://files.pythonhosted.org/packages/10/e7/5da0280c765d5a92af5e1cd324b3fe8464303189cbaa449de9a71910e273/regex-2026.2.28-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:03a83cc26aa2acda6b8b9dfe748cf9e84cbd390c424a1de34fdcef58961a297a", size = 781117, upload-time = "2026-02-28T02:16:36.253Z" }, + { url = "https://files.pythonhosted.org/packages/76/39/0b8d7efb256ae34e1b8157acc1afd8758048a1cf0196e1aec2e71fd99f4b/regex-2026.2.28-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ec6f5674c5dc836994f50f1186dd1fafde4be0666aae201ae2fcc3d29d8adf27", size = 854224, upload-time = "2026-02-28T02:16:38.119Z" }, + { url = "https://files.pythonhosted.org/packages/21/ff/a96d483ebe8fe6d1c67907729202313895d8de8495569ec319c6f29d0438/regex-2026.2.28-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:50c2fc924749543e0eacc93ada6aeeb3ea5f6715825624baa0dccaec771668ae", size = 761898, upload-time = "2026-02-28T02:16:40.333Z" }, + { url = "https://files.pythonhosted.org/packages/89/bd/d4f2e75cb4a54b484e796017e37c0d09d8a0a837de43d17e238adf163f4e/regex-2026.2.28-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:ba55c50f408fb5c346a3a02d2ce0ebc839784e24f7c9684fde328ff063c3cdea", size = 844832, upload-time = "2026-02-28T02:16:41.875Z" }, + { url = "https://files.pythonhosted.org/packages/8a/a7/428a135cf5e15e4e11d1e696eb2bf968362f8ea8a5f237122e96bc2ae950/regex-2026.2.28-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:edb1b1b3a5576c56f08ac46f108c40333f222ebfd5cf63afdfa3aab0791ebe5b", size = 788347, upload-time = "2026-02-28T02:16:43.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/59/68691428851cf9c9c3707217ab1d9b47cfeec9d153a49919e6c368b9e926/regex-2026.2.28-cp311-cp311-win32.whl", hash = "sha256:948c12ef30ecedb128903c2c2678b339746eb7c689c5c21957c4a23950c96d15", size = 266033, upload-time = "2026-02-28T02:16:45.094Z" }, + { url = "https://files.pythonhosted.org/packages/42/8b/1483de1c57024e89296cbcceb9cccb3f625d416ddb46e570be185c9b05a9/regex-2026.2.28-cp311-cp311-win_amd64.whl", hash = "sha256:fd63453f10d29097cc3dc62d070746523973fb5aa1c66d25f8558bebd47fed61", size = 277978, upload-time = "2026-02-28T02:16:46.75Z" }, + { url = "https://files.pythonhosted.org/packages/a4/36/abec45dc6e7252e3dbc797120496e43bb5730a7abf0d9cb69340696a2f2d/regex-2026.2.28-cp311-cp311-win_arm64.whl", hash = "sha256:00f2b8d9615aa165fdff0a13f1a92049bfad555ee91e20d246a51aa0b556c60a", size = 270340, upload-time = "2026-02-28T02:16:48.626Z" }, + { url = "https://files.pythonhosted.org/packages/07/42/9061b03cf0fc4b5fa2c3984cbbaed54324377e440a5c5a29d29a72518d62/regex-2026.2.28-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fcf26c3c6d0da98fada8ae4ef0aa1c3405a431c0a77eb17306d38a89b02adcd7", size = 489574, upload-time = "2026-02-28T02:16:50.455Z" }, + { url = "https://files.pythonhosted.org/packages/77/83/0c8a5623a233015595e3da499c5a1c13720ac63c107897a6037bb97af248/regex-2026.2.28-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:02473c954af35dd2defeb07e44182f5705b30ea3f351a7cbffa9177beb14da5d", size = 291426, upload-time = "2026-02-28T02:16:52.52Z" }, + { url = "https://files.pythonhosted.org/packages/9e/06/3ef1ac6910dc3295ebd71b1f9bfa737e82cfead211a18b319d45f85ddd09/regex-2026.2.28-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9b65d33a17101569f86d9c5966a8b1d7fbf8afdda5a8aa219301b0a80f58cf7d", size = 289200, upload-time = "2026-02-28T02:16:54.08Z" }, + { url = "https://files.pythonhosted.org/packages/dd/c9/8cc8d850b35ab5650ff6756a1cb85286e2000b66c97520b29c1587455344/regex-2026.2.28-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e71dcecaa113eebcc96622c17692672c2d104b1d71ddf7adeda90da7ddeb26fc", size = 796765, upload-time = "2026-02-28T02:16:55.905Z" }, + { url = "https://files.pythonhosted.org/packages/e9/5d/57702597627fc23278ebf36fbb497ac91c0ce7fec89ac6c81e420ca3e38c/regex-2026.2.28-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:481df4623fa4969c8b11f3433ed7d5e3dc9cec0f008356c3212b3933fb77e3d8", size = 863093, upload-time = "2026-02-28T02:16:58.094Z" }, + { url = "https://files.pythonhosted.org/packages/02/6d/f3ecad537ca2811b4d26b54ca848cf70e04fcfc138667c146a9f3157779c/regex-2026.2.28-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:64e7c6ad614573e0640f271e811a408d79a9e1fe62a46adb602f598df42a818d", size = 909455, upload-time = "2026-02-28T02:17:00.918Z" }, + { url = "https://files.pythonhosted.org/packages/9e/40/bb226f203caa22c1043c1ca79b36340156eca0f6a6742b46c3bb222a3a57/regex-2026.2.28-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6b08a06976ff4fb0d83077022fde3eca06c55432bb997d8c0495b9a4e9872f4", size = 802037, upload-time = "2026-02-28T02:17:02.842Z" }, + { url = "https://files.pythonhosted.org/packages/44/7c/c6d91d8911ac6803b45ca968e8e500c46934e58c0903cbc6d760ee817a0a/regex-2026.2.28-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:864cdd1a2ef5716b0ab468af40139e62ede1b3a53386b375ec0786bb6783fc05", size = 775113, upload-time = "2026-02-28T02:17:04.506Z" }, + { url = "https://files.pythonhosted.org/packages/dc/8d/4a9368d168d47abd4158580b8c848709667b1cd293ff0c0c277279543bd0/regex-2026.2.28-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:511f7419f7afab475fd4d639d4aedfc54205bcb0800066753ef68a59f0f330b5", size = 784194, upload-time = "2026-02-28T02:17:06.888Z" }, + { url = "https://files.pythonhosted.org/packages/cc/bf/2c72ab5d8b7be462cb1651b5cc333da1d0068740342f350fcca3bca31947/regex-2026.2.28-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b42f7466e32bf15a961cf09f35fa6323cc72e64d3d2c990b10de1274a5da0a59", size = 856846, upload-time = "2026-02-28T02:17:09.11Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f4/6b65c979bb6d09f51bb2d2a7bc85de73c01ec73335d7ddd202dcb8cd1c8f/regex-2026.2.28-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:8710d61737b0c0ce6836b1da7109f20d495e49b3809f30e27e9560be67a257bf", size = 763516, upload-time = "2026-02-28T02:17:11.004Z" }, + { url = "https://files.pythonhosted.org/packages/8e/32/29ea5e27400ee86d2cc2b4e80aa059df04eaf78b4f0c18576ae077aeff68/regex-2026.2.28-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4390c365fd2d45278f45afd4673cb90f7285f5701607e3ad4274df08e36140ae", size = 849278, upload-time = "2026-02-28T02:17:12.693Z" }, + { url = "https://files.pythonhosted.org/packages/1d/91/3233d03b5f865111cd517e1c95ee8b43e8b428d61fa73764a80c9bb6f537/regex-2026.2.28-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cb3b1db8ff6c7b8bf838ab05583ea15230cb2f678e569ab0e3a24d1e8320940b", size = 790068, upload-time = "2026-02-28T02:17:14.9Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/abc706c1fb03b4580a09645b206a3fc032f5a9f457bc1a8038ac555658ab/regex-2026.2.28-cp312-cp312-win32.whl", hash = "sha256:f8ed9a5d4612df9d4de15878f0bc6aa7a268afbe5af21a3fdd97fa19516e978c", size = 266416, upload-time = "2026-02-28T02:17:17.15Z" }, + { url = "https://files.pythonhosted.org/packages/fa/06/2a6f7dff190e5fa9df9fb4acf2fdf17a1aa0f7f54596cba8de608db56b3a/regex-2026.2.28-cp312-cp312-win_amd64.whl", hash = "sha256:01d65fd24206c8e1e97e2e31b286c59009636c022eb5d003f52760b0f42155d4", size = 277297, upload-time = "2026-02-28T02:17:18.723Z" }, + { url = "https://files.pythonhosted.org/packages/b7/f0/58a2484851fadf284458fdbd728f580d55c1abac059ae9f048c63b92f427/regex-2026.2.28-cp312-cp312-win_arm64.whl", hash = "sha256:c0b5ccbb8ffb433939d248707d4a8b31993cb76ab1a0187ca886bf50e96df952", size = 270408, upload-time = "2026-02-28T02:17:20.328Z" }, + { url = "https://files.pythonhosted.org/packages/87/f6/dc9ef48c61b79c8201585bf37fa70cd781977da86e466cd94e8e95d2443b/regex-2026.2.28-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6d63a07e5ec8ce7184452cb00c41c37b49e67dc4f73b2955b5b8e782ea970784", size = 489311, upload-time = "2026-02-28T02:17:22.591Z" }, + { url = "https://files.pythonhosted.org/packages/95/c8/c20390f2232d3f7956f420f4ef1852608ad57aa26c3dd78516cb9f3dc913/regex-2026.2.28-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e59bc8f30414d283ae8ee1617b13d8112e7135cb92830f0ec3688cb29152585a", size = 291285, upload-time = "2026-02-28T02:17:24.355Z" }, + { url = "https://files.pythonhosted.org/packages/d2/a6/ba1068a631ebd71a230e7d8013fcd284b7c89c35f46f34a7da02082141b1/regex-2026.2.28-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:de0cf053139f96219ccfabb4a8dd2d217c8c82cb206c91d9f109f3f552d6b43d", size = 289051, upload-time = "2026-02-28T02:17:26.722Z" }, + { url = "https://files.pythonhosted.org/packages/1d/1b/7cc3b7af4c244c204b7a80924bd3d85aecd9ba5bc82b485c5806ee8cda9e/regex-2026.2.28-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fb4db2f17e6484904f986c5a657cec85574c76b5c5e61c7aae9ffa1bc6224f95", size = 796842, upload-time = "2026-02-28T02:17:29.064Z" }, + { url = "https://files.pythonhosted.org/packages/24/87/26bd03efc60e0d772ac1e7b60a2e6325af98d974e2358f659c507d3c76db/regex-2026.2.28-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:52b017b35ac2214d0db5f4f90e303634dc44e4aba4bd6235a27f97ecbe5b0472", size = 863083, upload-time = "2026-02-28T02:17:31.363Z" }, + { url = "https://files.pythonhosted.org/packages/ae/54/aeaf4afb1aa0a65e40de52a61dc2ac5b00a83c6cb081c8a1d0dda74f3010/regex-2026.2.28-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:69fc560ccbf08a09dc9b52ab69cacfae51e0ed80dc5693078bdc97db2f91ae96", size = 909412, upload-time = "2026-02-28T02:17:33.248Z" }, + { url = "https://files.pythonhosted.org/packages/12/2f/049901def913954e640d199bbc6a7ca2902b6aeda0e5da9d17f114100ec2/regex-2026.2.28-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e61eea47230eba62a31f3e8a0e3164d0f37ef9f40529fb2c79361bc6b53d2a92", size = 802101, upload-time = "2026-02-28T02:17:35.053Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/512fb9ff7f5b15ea204bb1967ebb649059446decacccb201381f9fa6aad4/regex-2026.2.28-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4f5c0b182ad4269e7381b7c27fdb0408399881f7a92a4624fd5487f2971dfc11", size = 775260, upload-time = "2026-02-28T02:17:37.692Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a8/9a92935878aba19bd72706b9db5646a6f993d99b3f6ed42c02ec8beb1d61/regex-2026.2.28-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:96f6269a2882fbb0ee76967116b83679dc628e68eaea44e90884b8d53d833881", size = 784311, upload-time = "2026-02-28T02:17:39.855Z" }, + { url = "https://files.pythonhosted.org/packages/09/d3/fc51a8a738a49a6b6499626580554c9466d3ea561f2b72cfdc72e4149773/regex-2026.2.28-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b5acd4b6a95f37c3c3828e5d053a7d4edaedb85de551db0153754924cb7c83e3", size = 856876, upload-time = "2026-02-28T02:17:42.317Z" }, + { url = "https://files.pythonhosted.org/packages/08/b7/2e641f3d084b120ca4c52e8c762a78da0b32bf03ef546330db3e2635dc5f/regex-2026.2.28-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2234059cfe33d9813a3677ef7667999caea9eeaa83fef98eb6ce15c6cf9e0215", size = 763632, upload-time = "2026-02-28T02:17:45.073Z" }, + { url = "https://files.pythonhosted.org/packages/fe/6d/0009021d97e79ee99f3d8641f0a8d001eed23479ade4c3125a5480bf3e2d/regex-2026.2.28-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:c15af43c72a7fb0c97cbc66fa36a43546eddc5c06a662b64a0cbf30d6ac40944", size = 849320, upload-time = "2026-02-28T02:17:47.192Z" }, + { url = "https://files.pythonhosted.org/packages/05/7a/51cfbad5758f8edae430cb21961a9c8d04bce1dae4d2d18d4186eec7cfa1/regex-2026.2.28-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9185cc63359862a6e80fe97f696e04b0ad9a11c4ac0a4a927f979f611bfe3768", size = 790152, upload-time = "2026-02-28T02:17:49.067Z" }, + { url = "https://files.pythonhosted.org/packages/90/3d/a83e2b6b3daa142acb8c41d51de3876186307d5cb7490087031747662500/regex-2026.2.28-cp313-cp313-win32.whl", hash = "sha256:fb66e5245db9652abd7196ace599b04d9c0e4aa7c8f0e2803938377835780081", size = 266398, upload-time = "2026-02-28T02:17:50.744Z" }, + { url = "https://files.pythonhosted.org/packages/85/4f/16e9ebb1fe5425e11b9596c8d57bf8877dcb32391da0bfd33742e3290637/regex-2026.2.28-cp313-cp313-win_amd64.whl", hash = "sha256:71a911098be38c859ceb3f9a9ce43f4ed9f4c6720ad8684a066ea246b76ad9ff", size = 277282, upload-time = "2026-02-28T02:17:53.074Z" }, + { url = "https://files.pythonhosted.org/packages/07/b4/92851335332810c5a89723bf7a7e35c7209f90b7d4160024501717b28cc9/regex-2026.2.28-cp313-cp313-win_arm64.whl", hash = "sha256:39bb5727650b9a0275c6a6690f9bb3fe693a7e6cc5c3155b1240aedf8926423e", size = 270382, upload-time = "2026-02-28T02:17:54.888Z" }, + { url = "https://files.pythonhosted.org/packages/24/07/6c7e4cec1e585959e96cbc24299d97e4437a81173217af54f1804994e911/regex-2026.2.28-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:97054c55db06ab020342cc0d35d6f62a465fa7662871190175f1ad6c655c028f", size = 492541, upload-time = "2026-02-28T02:17:56.813Z" }, + { url = "https://files.pythonhosted.org/packages/7c/13/55eb22ada7f43d4f4bb3815b6132183ebc331c81bd496e2d1f3b8d862e0d/regex-2026.2.28-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0d25a10811de831c2baa6aef3c0be91622f44dd8d31dd12e69f6398efb15e48b", size = 292984, upload-time = "2026-02-28T02:17:58.538Z" }, + { url = "https://files.pythonhosted.org/packages/5b/11/c301f8cb29ce9644a5ef85104c59244e6e7e90994a0f458da4d39baa8e17/regex-2026.2.28-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d6cfe798d8da41bb1862ed6e0cba14003d387c3c0c4a5d45591076ae9f0ce2f8", size = 291509, upload-time = "2026-02-28T02:18:00.208Z" }, + { url = "https://files.pythonhosted.org/packages/b5/43/aabe384ec1994b91796e903582427bc2ffaed9c4103819ed3c16d8e749f3/regex-2026.2.28-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fd0ce43e71d825b7c0661f9c54d4d74bd97c56c3fd102a8985bcfea48236bacb", size = 809429, upload-time = "2026-02-28T02:18:02.328Z" }, + { url = "https://files.pythonhosted.org/packages/04/b8/8d2d987a816720c4f3109cee7c06a4b24ad0e02d4fc74919ab619e543737/regex-2026.2.28-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00945d007fd74a9084d2ab79b695b595c6b7ba3698972fadd43e23230c6979c1", size = 869422, upload-time = "2026-02-28T02:18:04.23Z" }, + { url = "https://files.pythonhosted.org/packages/fc/ad/2c004509e763c0c3719f97c03eca26473bffb3868d54c5f280b8cd4f9e3d/regex-2026.2.28-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bec23c11cbbf09a4df32fe50d57cbdd777bc442269b6e39a1775654f1c95dee2", size = 915175, upload-time = "2026-02-28T02:18:06.791Z" }, + { url = "https://files.pythonhosted.org/packages/55/c2/fd429066da487ef555a9da73bf214894aec77fc8c66a261ee355a69871a8/regex-2026.2.28-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5cdcc17d935c8f9d3f4db5c2ebe2640c332e3822ad5d23c2f8e0228e6947943a", size = 812044, upload-time = "2026-02-28T02:18:08.736Z" }, + { url = "https://files.pythonhosted.org/packages/5b/ca/feedb7055c62a3f7f659971bf45f0e0a87544b6b0cf462884761453f97c5/regex-2026.2.28-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a448af01e3d8031c89c5d902040b124a5e921a25c4e5e07a861ca591ce429341", size = 782056, upload-time = "2026-02-28T02:18:10.777Z" }, + { url = "https://files.pythonhosted.org/packages/95/30/1aa959ed0d25c1dd7dd5047ea8ba482ceaef38ce363c401fd32a6b923e60/regex-2026.2.28-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:10d28e19bd4888e4abf43bd3925f3c134c52fdf7259219003588a42e24c2aa25", size = 798743, upload-time = "2026-02-28T02:18:13.025Z" }, + { url = "https://files.pythonhosted.org/packages/3b/1f/dadb9cf359004784051c897dcf4d5d79895f73a1bbb7b827abaa4814ae80/regex-2026.2.28-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:99985a2c277dcb9ccb63f937451af5d65177af1efdeb8173ac55b61095a0a05c", size = 864633, upload-time = "2026-02-28T02:18:16.84Z" }, + { url = "https://files.pythonhosted.org/packages/a7/f1/b9a25eb24e1cf79890f09e6ec971ee5b511519f1851de3453bc04f6c902b/regex-2026.2.28-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:e1e7b24cb3ae9953a560c563045d1ba56ee4749fbd05cf21ba571069bd7be81b", size = 770862, upload-time = "2026-02-28T02:18:18.892Z" }, + { url = "https://files.pythonhosted.org/packages/02/9a/c5cb10b7aa6f182f9247a30cc9527e326601f46f4df864ac6db588d11fcd/regex-2026.2.28-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:d8511a01d0e4ee1992eb3ba19e09bc1866fe03f05129c3aec3fdc4cbc77aad3f", size = 854788, upload-time = "2026-02-28T02:18:21.475Z" }, + { url = "https://files.pythonhosted.org/packages/0a/50/414ba0731c4bd40b011fa4703b2cc86879ec060c64f2a906e65a56452589/regex-2026.2.28-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:aaffaecffcd2479ce87aa1e74076c221700b7c804e48e98e62500ee748f0f550", size = 800184, upload-time = "2026-02-28T02:18:23.492Z" }, + { url = "https://files.pythonhosted.org/packages/69/50/0c7290987f97e7e6830b0d853f69dc4dc5852c934aae63e7fdcd76b4c383/regex-2026.2.28-cp313-cp313t-win32.whl", hash = "sha256:ef77bdde9c9eba3f7fa5b58084b29bbcc74bcf55fdbeaa67c102a35b5bd7e7cc", size = 269137, upload-time = "2026-02-28T02:18:25.375Z" }, + { url = "https://files.pythonhosted.org/packages/68/80/ef26ff90e74ceb4051ad6efcbbb8a4be965184a57e879ebcbdef327d18fa/regex-2026.2.28-cp313-cp313t-win_amd64.whl", hash = "sha256:98adf340100cbe6fbaf8e6dc75e28f2c191b1be50ffefe292fb0e6f6eefdb0d8", size = 280682, upload-time = "2026-02-28T02:18:27.205Z" }, + { url = "https://files.pythonhosted.org/packages/69/8b/fbad9c52e83ffe8f97e3ed1aa0516e6dff6bb633a41da9e64645bc7efdc5/regex-2026.2.28-cp313-cp313t-win_arm64.whl", hash = "sha256:2fb950ac1d88e6b6a9414381f403797b236f9fa17e1eee07683af72b1634207b", size = 271735, upload-time = "2026-02-28T02:18:29.015Z" }, + { url = "https://files.pythonhosted.org/packages/cf/03/691015f7a7cb1ed6dacb2ea5de5682e4858e05a4c5506b2839cd533bbcd6/regex-2026.2.28-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:78454178c7df31372ea737996fb7f36b3c2c92cccc641d251e072478afb4babc", size = 489497, upload-time = "2026-02-28T02:18:30.889Z" }, + { url = "https://files.pythonhosted.org/packages/c6/ba/8db8fd19afcbfa0e1036eaa70c05f20ca8405817d4ad7a38a6b4c2f031ac/regex-2026.2.28-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:5d10303dd18cedfd4d095543998404df656088240bcfd3cd20a8f95b861f74bd", size = 291295, upload-time = "2026-02-28T02:18:33.426Z" }, + { url = "https://files.pythonhosted.org/packages/5a/79/9aa0caf089e8defef9b857b52fc53801f62ff868e19e5c83d4a96612eba1/regex-2026.2.28-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:19a9c9e0a8f24f39d575a6a854d516b48ffe4cbdcb9de55cb0570a032556ecff", size = 289275, upload-time = "2026-02-28T02:18:35.247Z" }, + { url = "https://files.pythonhosted.org/packages/eb/26/ee53117066a30ef9c883bf1127eece08308ccf8ccd45c45a966e7a665385/regex-2026.2.28-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09500be324f49b470d907b3ef8af9afe857f5cca486f853853f7945ddbf75911", size = 797176, upload-time = "2026-02-28T02:18:37.15Z" }, + { url = "https://files.pythonhosted.org/packages/05/1b/67fb0495a97259925f343ae78b5d24d4a6624356ae138b57f18bd43006e4/regex-2026.2.28-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fb1c4ff62277d87a7335f2c1ea4e0387b8f2b3ad88a64efd9943906aafad4f33", size = 863813, upload-time = "2026-02-28T02:18:39.478Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/93ac9bbafc53618091c685c7ed40239a90bf9f2a82c983f0baa97cb7ae07/regex-2026.2.28-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b8b3f1be1738feadc69f62daa250c933e85c6f34fa378f54a7ff43807c1b9117", size = 908678, upload-time = "2026-02-28T02:18:41.619Z" }, + { url = "https://files.pythonhosted.org/packages/c7/7a/a8f5e0561702b25239846a16349feece59712ae20598ebb205580332a471/regex-2026.2.28-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc8ed8c3f41c27acb83f7b6a9eb727a73fc6663441890c5cb3426a5f6a91ce7d", size = 801528, upload-time = "2026-02-28T02:18:43.624Z" }, + { url = "https://files.pythonhosted.org/packages/96/5d/ed6d4cbde80309854b1b9f42d9062fee38ade15f7eb4909f6ef2440403b5/regex-2026.2.28-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa539be029844c0ce1114762d2952ab6cfdd7c7c9bd72e0db26b94c3c36dcc5a", size = 775373, upload-time = "2026-02-28T02:18:46.102Z" }, + { url = "https://files.pythonhosted.org/packages/6a/e9/6e53c34e8068b9deec3e87210086ecb5b9efebdefca6b0d3fa43d66dcecb/regex-2026.2.28-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7900157786428a79615a8264dac1f12c9b02957c473c8110c6b1f972dcecaddf", size = 784859, upload-time = "2026-02-28T02:18:48.269Z" }, + { url = "https://files.pythonhosted.org/packages/48/3c/736e1c7ca7f0dcd2ae33819888fdc69058a349b7e5e84bc3e2f296bbf794/regex-2026.2.28-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:0b1d2b07614d95fa2bf8a63fd1e98bd8fa2b4848dc91b1efbc8ba219fdd73952", size = 857813, upload-time = "2026-02-28T02:18:50.576Z" }, + { url = "https://files.pythonhosted.org/packages/6e/7c/48c4659ad9da61f58e79dbe8c05223e0006696b603c16eb6b5cbfbb52c27/regex-2026.2.28-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:b389c61aa28a79c2e0527ac36da579869c2e235a5b208a12c5b5318cda2501d8", size = 763705, upload-time = "2026-02-28T02:18:52.59Z" }, + { url = "https://files.pythonhosted.org/packages/cf/a1/bc1c261789283128165f71b71b4b221dd1b79c77023752a6074c102f18d8/regex-2026.2.28-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:f467cb602f03fbd1ab1908f68b53c649ce393fde056628dc8c7e634dab6bfc07", size = 848734, upload-time = "2026-02-28T02:18:54.595Z" }, + { url = "https://files.pythonhosted.org/packages/10/d8/979407faf1397036e25a5ae778157366a911c0f382c62501009f4957cf86/regex-2026.2.28-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e8c8cb2deba42f5ec1ede46374e990f8adc5e6456a57ac1a261b19be6f28e4e6", size = 789871, upload-time = "2026-02-28T02:18:57.34Z" }, + { url = "https://files.pythonhosted.org/packages/03/23/da716821277115fcb1f4e3de1e5dc5023a1e6533598c486abf5448612579/regex-2026.2.28-cp314-cp314-win32.whl", hash = "sha256:9036b400b20e4858d56d117108d7813ed07bb7803e3eed766675862131135ca6", size = 271825, upload-time = "2026-02-28T02:18:59.202Z" }, + { url = "https://files.pythonhosted.org/packages/91/ff/90696f535d978d5f16a52a419be2770a8d8a0e7e0cfecdbfc31313df7fab/regex-2026.2.28-cp314-cp314-win_amd64.whl", hash = "sha256:1d367257cd86c1cbb97ea94e77b373a0bbc2224976e247f173d19e8f18b4afa7", size = 280548, upload-time = "2026-02-28T02:19:01.049Z" }, + { url = "https://files.pythonhosted.org/packages/69/f9/5e1b5652fc0af3fcdf7677e7df3ad2a0d47d669b34ac29a63bb177bb731b/regex-2026.2.28-cp314-cp314-win_arm64.whl", hash = "sha256:5e68192bb3a1d6fb2836da24aa494e413ea65853a21505e142e5b1064a595f3d", size = 273444, upload-time = "2026-02-28T02:19:03.255Z" }, + { url = "https://files.pythonhosted.org/packages/d3/eb/8389f9e940ac89bcf58d185e230a677b4fd07c5f9b917603ad5c0f8fa8fe/regex-2026.2.28-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:a5dac14d0872eeb35260a8e30bac07ddf22adc1e3a0635b52b02e180d17c9c7e", size = 492546, upload-time = "2026-02-28T02:19:05.378Z" }, + { url = "https://files.pythonhosted.org/packages/7b/c7/09441d27ce2a6fa6a61ea3150ea4639c1dcda9b31b2ea07b80d6937b24dd/regex-2026.2.28-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:ec0c608b7a7465ffadb344ed7c987ff2f11ee03f6a130b569aa74d8a70e8333c", size = 292986, upload-time = "2026-02-28T02:19:07.24Z" }, + { url = "https://files.pythonhosted.org/packages/fb/69/4144b60ed7760a6bd235e4087041f487aa4aa62b45618ce018b0c14833ea/regex-2026.2.28-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c7815afb0ca45456613fdaf60ea9c993715511c8d53a83bc468305cbc0ee23c7", size = 291518, upload-time = "2026-02-28T02:19:09.698Z" }, + { url = "https://files.pythonhosted.org/packages/2d/be/77e5426cf5948c82f98c53582009ca9e94938c71f73a8918474f2e2990bb/regex-2026.2.28-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b059e71ec363968671693a78c5053bd9cb2fe410f9b8e4657e88377ebd603a2e", size = 809464, upload-time = "2026-02-28T02:19:12.494Z" }, + { url = "https://files.pythonhosted.org/packages/45/99/2c8c5ac90dc7d05c6e7d8e72c6a3599dc08cd577ac476898e91ca787d7f1/regex-2026.2.28-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b8cf76f1a29f0e99dcfd7aef1551a9827588aae5a737fe31442021165f1920dc", size = 869553, upload-time = "2026-02-28T02:19:15.151Z" }, + { url = "https://files.pythonhosted.org/packages/53/34/daa66a342f0271e7737003abf6c3097aa0498d58c668dbd88362ef94eb5d/regex-2026.2.28-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:180e08a435a0319e6a4821c3468da18dc7001987e1c17ae1335488dfe7518dd8", size = 915289, upload-time = "2026-02-28T02:19:17.331Z" }, + { url = "https://files.pythonhosted.org/packages/c5/c7/e22c2aaf0a12e7e22ab19b004bb78d32ca1ecc7ef245949935463c5567de/regex-2026.2.28-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1e496956106fd59ba6322a8ea17141a27c5040e5ee8f9433ae92d4e5204462a0", size = 812156, upload-time = "2026-02-28T02:19:20.011Z" }, + { url = "https://files.pythonhosted.org/packages/7f/bb/2dc18c1efd9051cf389cd0d7a3a4d90f6804b9fff3a51b5dc3c85b935f71/regex-2026.2.28-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bba2b18d70eeb7b79950f12f633beeecd923f7c9ad6f6bae28e59b4cb3ab046b", size = 782215, upload-time = "2026-02-28T02:19:22.047Z" }, + { url = "https://files.pythonhosted.org/packages/17/1e/9e4ec9b9013931faa32226ec4aa3c71fe664a6d8a2b91ac56442128b332f/regex-2026.2.28-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:6db7bfae0f8a2793ff1f7021468ea55e2699d0790eb58ee6ab36ae43aa00bc5b", size = 798925, upload-time = "2026-02-28T02:19:24.173Z" }, + { url = "https://files.pythonhosted.org/packages/71/57/a505927e449a9ccb41e2cc8d735e2abe3444b0213d1cf9cb364a8c1f2524/regex-2026.2.28-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:d0b02e8b7e5874b48ae0f077ecca61c1a6a9f9895e9c6dfb191b55b242862033", size = 864701, upload-time = "2026-02-28T02:19:26.376Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ad/c62cb60cdd93e13eac5b3d9d6bd5d284225ed0e3329426f94d2552dd7cca/regex-2026.2.28-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:25b6eb660c5cf4b8c3407a1ed462abba26a926cc9965e164268a3267bcc06a43", size = 770899, upload-time = "2026-02-28T02:19:29.38Z" }, + { url = "https://files.pythonhosted.org/packages/3c/5a/874f861f5c3d5ab99633e8030dee1bc113db8e0be299d1f4b07f5b5ec349/regex-2026.2.28-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:5a932ea8ad5d0430351ff9c76c8db34db0d9f53c1d78f06022a21f4e290c5c18", size = 854727, upload-time = "2026-02-28T02:19:31.494Z" }, + { url = "https://files.pythonhosted.org/packages/6b/ca/d2c03b0efde47e13db895b975b2be6a73ed90b8ba963677927283d43bf74/regex-2026.2.28-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:1c2c95e1a2b0f89d01e821ff4de1be4b5d73d1f4b0bf679fa27c1ad8d2327f1a", size = 800366, upload-time = "2026-02-28T02:19:34.248Z" }, + { url = "https://files.pythonhosted.org/packages/14/bd/ee13b20b763b8989f7c75d592bfd5de37dc1181814a2a2747fedcf97e3ba/regex-2026.2.28-cp314-cp314t-win32.whl", hash = "sha256:bbb882061f742eb5d46f2f1bd5304055be0a66b783576de3d7eef1bed4778a6e", size = 274936, upload-time = "2026-02-28T02:19:36.313Z" }, + { url = "https://files.pythonhosted.org/packages/cb/e7/d8020e39414c93af7f0d8688eabcecece44abfd5ce314b21dfda0eebd3d8/regex-2026.2.28-cp314-cp314t-win_amd64.whl", hash = "sha256:6591f281cb44dc13de9585b552cec6fc6cf47fb2fe7a48892295ee9bc4a612f9", size = 284779, upload-time = "2026-02-28T02:19:38.625Z" }, + { url = "https://files.pythonhosted.org/packages/13/c0/ad225f4a405827486f1955283407cf758b6d2fb966712644c5f5aef33d1b/regex-2026.2.28-cp314-cp314t-win_arm64.whl", hash = "sha256:dee50f1be42222f89767b64b283283ef963189da0dda4a515aa54a5563c62dec", size = 275010, upload-time = "2026-02-28T02:19:40.65Z" }, +] + [[package]] name = "requests" version = "2.32.5" @@ -1050,6 +1156,60 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, ] +[[package]] +name = "tiktoken" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "regex" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/ab/4d017d0f76ec3171d469d80fc03dfbb4e48a4bcaddaa831b31d526f05edc/tiktoken-0.12.0.tar.gz", hash = "sha256:b18ba7ee2b093863978fcb14f74b3707cdc8d4d4d3836853ce7ec60772139931", size = 37806, upload-time = "2025-10-06T20:22:45.419Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/46/21ea696b21f1d6d1efec8639c204bdf20fde8bafb351e1355c72c5d7de52/tiktoken-0.12.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6e227c7f96925003487c33b1b32265fad2fbcec2b7cf4817afb76d416f40f6bb", size = 1051565, upload-time = "2025-10-06T20:21:44.566Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d9/35c5d2d9e22bb2a5f74ba48266fb56c63d76ae6f66e02feb628671c0283e/tiktoken-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c06cf0fcc24c2cb2adb5e185c7082a82cba29c17575e828518c2f11a01f445aa", size = 995284, upload-time = "2025-10-06T20:21:45.622Z" }, + { url = "https://files.pythonhosted.org/packages/01/84/961106c37b8e49b9fdcf33fe007bb3a8fdcc380c528b20cc7fbba80578b8/tiktoken-0.12.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:f18f249b041851954217e9fd8e5c00b024ab2315ffda5ed77665a05fa91f42dc", size = 1129201, upload-time = "2025-10-06T20:21:47.074Z" }, + { url = "https://files.pythonhosted.org/packages/6a/d0/3d9275198e067f8b65076a68894bb52fd253875f3644f0a321a720277b8a/tiktoken-0.12.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:47a5bc270b8c3db00bb46ece01ef34ad050e364b51d406b6f9730b64ac28eded", size = 1152444, upload-time = "2025-10-06T20:21:48.139Z" }, + { url = "https://files.pythonhosted.org/packages/78/db/a58e09687c1698a7c592e1038e01c206569b86a0377828d51635561f8ebf/tiktoken-0.12.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:508fa71810c0efdcd1b898fda574889ee62852989f7c1667414736bcb2b9a4bd", size = 1195080, upload-time = "2025-10-06T20:21:49.246Z" }, + { url = "https://files.pythonhosted.org/packages/9e/1b/a9e4d2bf91d515c0f74afc526fd773a812232dd6cda33ebea7f531202325/tiktoken-0.12.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a1af81a6c44f008cba48494089dd98cccb8b313f55e961a52f5b222d1e507967", size = 1255240, upload-time = "2025-10-06T20:21:50.274Z" }, + { url = "https://files.pythonhosted.org/packages/9d/15/963819345f1b1fb0809070a79e9dd96938d4ca41297367d471733e79c76c/tiktoken-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:3e68e3e593637b53e56f7237be560f7a394451cb8c11079755e80ae64b9e6def", size = 879422, upload-time = "2025-10-06T20:21:51.734Z" }, + { url = "https://files.pythonhosted.org/packages/a4/85/be65d39d6b647c79800fd9d29241d081d4eeb06271f383bb87200d74cf76/tiktoken-0.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b97f74aca0d78a1ff21b8cd9e9925714c15a9236d6ceacf5c7327c117e6e21e8", size = 1050728, upload-time = "2025-10-06T20:21:52.756Z" }, + { url = "https://files.pythonhosted.org/packages/4a/42/6573e9129bc55c9bf7300b3a35bef2c6b9117018acca0dc760ac2d93dffe/tiktoken-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2b90f5ad190a4bb7c3eb30c5fa32e1e182ca1ca79f05e49b448438c3e225a49b", size = 994049, upload-time = "2025-10-06T20:21:53.782Z" }, + { url = "https://files.pythonhosted.org/packages/66/c5/ed88504d2f4a5fd6856990b230b56d85a777feab84e6129af0822f5d0f70/tiktoken-0.12.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:65b26c7a780e2139e73acc193e5c63ac754021f160df919add909c1492c0fb37", size = 1129008, upload-time = "2025-10-06T20:21:54.832Z" }, + { url = "https://files.pythonhosted.org/packages/f4/90/3dae6cc5436137ebd38944d396b5849e167896fc2073da643a49f372dc4f/tiktoken-0.12.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:edde1ec917dfd21c1f2f8046b86348b0f54a2c0547f68149d8600859598769ad", size = 1152665, upload-time = "2025-10-06T20:21:56.129Z" }, + { url = "https://files.pythonhosted.org/packages/a3/fe/26df24ce53ffde419a42f5f53d755b995c9318908288c17ec3f3448313a3/tiktoken-0.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:35a2f8ddd3824608b3d650a000c1ef71f730d0c56486845705a8248da00f9fe5", size = 1194230, upload-time = "2025-10-06T20:21:57.546Z" }, + { url = "https://files.pythonhosted.org/packages/20/cc/b064cae1a0e9fac84b0d2c46b89f4e57051a5f41324e385d10225a984c24/tiktoken-0.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83d16643edb7fa2c99eff2ab7733508aae1eebb03d5dfc46f5565862810f24e3", size = 1254688, upload-time = "2025-10-06T20:21:58.619Z" }, + { url = "https://files.pythonhosted.org/packages/81/10/b8523105c590c5b8349f2587e2fdfe51a69544bd5a76295fc20f2374f470/tiktoken-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ffc5288f34a8bc02e1ea7047b8d041104791d2ddbf42d1e5fa07822cbffe16bd", size = 878694, upload-time = "2025-10-06T20:21:59.876Z" }, + { url = "https://files.pythonhosted.org/packages/00/61/441588ee21e6b5cdf59d6870f86beb9789e532ee9718c251b391b70c68d6/tiktoken-0.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:775c2c55de2310cc1bc9a3ad8826761cbdc87770e586fd7b6da7d4589e13dab3", size = 1050802, upload-time = "2025-10-06T20:22:00.96Z" }, + { url = "https://files.pythonhosted.org/packages/1f/05/dcf94486d5c5c8d34496abe271ac76c5b785507c8eae71b3708f1ad9b45a/tiktoken-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a01b12f69052fbe4b080a2cfb867c4de12c704b56178edf1d1d7b273561db160", size = 993995, upload-time = "2025-10-06T20:22:02.788Z" }, + { url = "https://files.pythonhosted.org/packages/a0/70/5163fe5359b943f8db9946b62f19be2305de8c3d78a16f629d4165e2f40e/tiktoken-0.12.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:01d99484dc93b129cd0964f9d34eee953f2737301f18b3c7257bf368d7615baa", size = 1128948, upload-time = "2025-10-06T20:22:03.814Z" }, + { url = "https://files.pythonhosted.org/packages/0c/da/c028aa0babf77315e1cef357d4d768800c5f8a6de04d0eac0f377cb619fa/tiktoken-0.12.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:4a1a4fcd021f022bfc81904a911d3df0f6543b9e7627b51411da75ff2fe7a1be", size = 1151986, upload-time = "2025-10-06T20:22:05.173Z" }, + { url = "https://files.pythonhosted.org/packages/a0/5a/886b108b766aa53e295f7216b509be95eb7d60b166049ce2c58416b25f2a/tiktoken-0.12.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:981a81e39812d57031efdc9ec59fa32b2a5a5524d20d4776574c4b4bd2e9014a", size = 1194222, upload-time = "2025-10-06T20:22:06.265Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f8/4db272048397636ac7a078d22773dd2795b1becee7bc4922fe6207288d57/tiktoken-0.12.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9baf52f84a3f42eef3ff4e754a0db79a13a27921b457ca9832cf944c6be4f8f3", size = 1255097, upload-time = "2025-10-06T20:22:07.403Z" }, + { url = "https://files.pythonhosted.org/packages/8e/32/45d02e2e0ea2be3a9ed22afc47d93741247e75018aac967b713b2941f8ea/tiktoken-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:b8a0cd0c789a61f31bf44851defbd609e8dd1e2c8589c614cc1060940ef1f697", size = 879117, upload-time = "2025-10-06T20:22:08.418Z" }, + { url = "https://files.pythonhosted.org/packages/ce/76/994fc868f88e016e6d05b0da5ac24582a14c47893f4474c3e9744283f1d5/tiktoken-0.12.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d5f89ea5680066b68bcb797ae85219c72916c922ef0fcdd3480c7d2315ffff16", size = 1050309, upload-time = "2025-10-06T20:22:10.939Z" }, + { url = "https://files.pythonhosted.org/packages/f6/b8/57ef1456504c43a849821920d582a738a461b76a047f352f18c0b26c6516/tiktoken-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b4e7ed1c6a7a8a60a3230965bdedba8cc58f68926b835e519341413370e0399a", size = 993712, upload-time = "2025-10-06T20:22:12.115Z" }, + { url = "https://files.pythonhosted.org/packages/72/90/13da56f664286ffbae9dbcfadcc625439142675845baa62715e49b87b68b/tiktoken-0.12.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:fc530a28591a2d74bce821d10b418b26a094bf33839e69042a6e86ddb7a7fb27", size = 1128725, upload-time = "2025-10-06T20:22:13.541Z" }, + { url = "https://files.pythonhosted.org/packages/05/df/4f80030d44682235bdaecd7346c90f67ae87ec8f3df4a3442cb53834f7e4/tiktoken-0.12.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:06a9f4f49884139013b138920a4c393aa6556b2f8f536345f11819389c703ebb", size = 1151875, upload-time = "2025-10-06T20:22:14.559Z" }, + { url = "https://files.pythonhosted.org/packages/22/1f/ae535223a8c4ef4c0c1192e3f9b82da660be9eb66b9279e95c99288e9dab/tiktoken-0.12.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:04f0e6a985d95913cabc96a741c5ffec525a2c72e9df086ff17ebe35985c800e", size = 1194451, upload-time = "2025-10-06T20:22:15.545Z" }, + { url = "https://files.pythonhosted.org/packages/78/a7/f8ead382fce0243cb625c4f266e66c27f65ae65ee9e77f59ea1653b6d730/tiktoken-0.12.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0ee8f9ae00c41770b5f9b0bb1235474768884ae157de3beb5439ca0fd70f3e25", size = 1253794, upload-time = "2025-10-06T20:22:16.624Z" }, + { url = "https://files.pythonhosted.org/packages/93/e0/6cc82a562bc6365785a3ff0af27a2a092d57c47d7a81d9e2295d8c36f011/tiktoken-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:dc2dd125a62cb2b3d858484d6c614d136b5b848976794edfb63688d539b8b93f", size = 878777, upload-time = "2025-10-06T20:22:18.036Z" }, + { url = "https://files.pythonhosted.org/packages/72/05/3abc1db5d2c9aadc4d2c76fa5640134e475e58d9fbb82b5c535dc0de9b01/tiktoken-0.12.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a90388128df3b3abeb2bfd1895b0681412a8d7dc644142519e6f0a97c2111646", size = 1050188, upload-time = "2025-10-06T20:22:19.563Z" }, + { url = "https://files.pythonhosted.org/packages/e3/7b/50c2f060412202d6c95f32b20755c7a6273543b125c0985d6fa9465105af/tiktoken-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:da900aa0ad52247d8794e307d6446bd3cdea8e192769b56276695d34d2c9aa88", size = 993978, upload-time = "2025-10-06T20:22:20.702Z" }, + { url = "https://files.pythonhosted.org/packages/14/27/bf795595a2b897e271771cd31cb847d479073497344c637966bdf2853da1/tiktoken-0.12.0-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:285ba9d73ea0d6171e7f9407039a290ca77efcdb026be7769dccc01d2c8d7fff", size = 1129271, upload-time = "2025-10-06T20:22:22.06Z" }, + { url = "https://files.pythonhosted.org/packages/f5/de/9341a6d7a8f1b448573bbf3425fa57669ac58258a667eb48a25dfe916d70/tiktoken-0.12.0-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:d186a5c60c6a0213f04a7a802264083dea1bbde92a2d4c7069e1a56630aef830", size = 1151216, upload-time = "2025-10-06T20:22:23.085Z" }, + { url = "https://files.pythonhosted.org/packages/75/0d/881866647b8d1be4d67cb24e50d0c26f9f807f994aa1510cb9ba2fe5f612/tiktoken-0.12.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:604831189bd05480f2b885ecd2d1986dc7686f609de48208ebbbddeea071fc0b", size = 1194860, upload-time = "2025-10-06T20:22:24.602Z" }, + { url = "https://files.pythonhosted.org/packages/b3/1e/b651ec3059474dab649b8d5b69f5c65cd8fcd8918568c1935bd4136c9392/tiktoken-0.12.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8f317e8530bb3a222547b85a58583238c8f74fd7a7408305f9f63246d1a0958b", size = 1254567, upload-time = "2025-10-06T20:22:25.671Z" }, + { url = "https://files.pythonhosted.org/packages/80/57/ce64fd16ac390fafde001268c364d559447ba09b509181b2808622420eec/tiktoken-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:399c3dd672a6406719d84442299a490420b458c44d3ae65516302a99675888f3", size = 921067, upload-time = "2025-10-06T20:22:26.753Z" }, + { url = "https://files.pythonhosted.org/packages/ac/a4/72eed53e8976a099539cdd5eb36f241987212c29629d0a52c305173e0a68/tiktoken-0.12.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:c2c714c72bc00a38ca969dae79e8266ddec999c7ceccd603cc4f0d04ccd76365", size = 1050473, upload-time = "2025-10-06T20:22:27.775Z" }, + { url = "https://files.pythonhosted.org/packages/e6/d7/0110b8f54c008466b19672c615f2168896b83706a6611ba6e47313dbc6e9/tiktoken-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:cbb9a3ba275165a2cb0f9a83f5d7025afe6b9d0ab01a22b50f0e74fee2ad253e", size = 993855, upload-time = "2025-10-06T20:22:28.799Z" }, + { url = "https://files.pythonhosted.org/packages/5f/77/4f268c41a3957c418b084dd576ea2fad2e95da0d8e1ab705372892c2ca22/tiktoken-0.12.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:dfdfaa5ffff8993a3af94d1125870b1d27aed7cb97aa7eb8c1cefdbc87dbee63", size = 1129022, upload-time = "2025-10-06T20:22:29.981Z" }, + { url = "https://files.pythonhosted.org/packages/4e/2b/fc46c90fe5028bd094cd6ee25a7db321cb91d45dc87531e2bdbb26b4867a/tiktoken-0.12.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:584c3ad3d0c74f5269906eb8a659c8bfc6144a52895d9261cdaf90a0ae5f4de0", size = 1150736, upload-time = "2025-10-06T20:22:30.996Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/3c7a39ff68022ddfd7d93f3337ad90389a342f761c4d71de99a3ccc57857/tiktoken-0.12.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:54c891b416a0e36b8e2045b12b33dd66fb34a4fe7965565f1b482da50da3e86a", size = 1194908, upload-time = "2025-10-06T20:22:32.073Z" }, + { url = "https://files.pythonhosted.org/packages/ab/0d/c1ad6f4016a3968c048545f5d9b8ffebf577774b2ede3e2e352553b685fe/tiktoken-0.12.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5edb8743b88d5be814b1a8a8854494719080c28faaa1ccbef02e87354fe71ef0", size = 1253706, upload-time = "2025-10-06T20:22:33.385Z" }, + { url = "https://files.pythonhosted.org/packages/af/df/c7891ef9d2712ad774777271d39fdef63941ffba0a9d59b7ad1fd2765e57/tiktoken-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:f61c0aea5565ac82e2ec50a05e02a6c44734e91b51c10510b084ea1b8e633a71", size = 920667, upload-time = "2025-10-06T20:22:34.444Z" }, +] + [[package]] name = "tqdm" version = "4.67.1"