diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0435884..6a3921d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -55,7 +55,7 @@ repos: - id: python-use-type-annotations - id: text-unicode-replacement-char - repo: https://github.com/codespell-project/codespell - rev: v2.3.0 + rev: v2.4.0 hooks: - id: codespell additional_dependencies: ["tomli"] diff --git a/README.md b/README.md index 7fc1605..fcace5e 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,21 @@ pip install uv ## Running the Benchmark +To reproduce the benchmarks, you'll need the ImageNet validation dataset: + +Download the validation set (50,000 images, ~6.3GB): + +```bash +wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar +``` + +Create a directory and extract the images: + +```bash +mkdir -p imagenet/val +tar -xf ILSVRC2012_img_val.tar -C imagenet/val +``` + The benchmark script creates separate virtual environments for each library and runs tests independently: