Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 17 additions & 3 deletions modelconverter/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
archive_from_model,
docker_build,
docker_exec,
get_default_target_version,
get_local_docker_image,
in_docker,
resolve_path,
upload_to_remote,
Expand Down Expand Up @@ -630,9 +632,21 @@ def launcher(
target = bound.arguments["target"]

if dev:
docker_build(
target.value, bare_tag=tag, version=tool_version, image=image
)
version = tool_version or get_default_target_version(target.value)
# CI invokes multiple dev docker commands per job; reuse the first
# local build so later commands don't rebuild the same image again.
if not (
os.getenv("CI") == "true"
and get_local_docker_image(
target.value,
bare_tag=tag,
version=version,
image=image,
)
):
docker_build(
target.value, bare_tag=tag, version=version, image=image
)

docker_exec(
target.value,
Expand Down
27 changes: 16 additions & 11 deletions modelconverter/packages/base_exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from typing import Any

import numpy as np
import onnx
from loguru import logger

from modelconverter.utils import (
Expand All @@ -20,6 +19,7 @@
RandomCalibrationConfig,
SingleStageConfig,
)
from modelconverter.utils.onnx_compatibility import save_onnx_model
from modelconverter.utils.subprocess import SubprocessResult
from modelconverter.utils.types import InputFileType, Target

Expand Down Expand Up @@ -129,7 +129,13 @@ def simplify_onnx(self) -> Path:
)
return self.input_model

onnx_sim, check = simplify(str(self.input_model))
try:
onnx_sim, check = simplify(str(self.input_model))
except Exception as e:
logger.warning(
f"Failed to simplify ONNX: {e}. Proceeding without simplification."
)
return self.input_model
if not check:
logger.warning(
"Provided ONNX could not be simplified. "
Expand All @@ -141,15 +147,14 @@ def simplify_onnx(self) -> Path:
self.input_model, "simplified.onnx"
)
logger.info(f"Saving simplified ONNX to {onnx_sim_path}")
if self.input_model.with_suffix(".onnx_data").exists():
onnx.save(
onnx_sim,
str(onnx_sim_path),
save_as_external_data=True,
location=f"{onnx_sim_path.name}_data",
)
else:
onnx.save(onnx_sim, str(onnx_sim_path))
save_onnx_model(
onnx_sim,
onnx_sim_path,
save_as_external_data=self.input_model.with_suffix(
".onnx_data"
).exists(),
location=f"{onnx_sim_path.name}_data",
)
return onnx_sim_path

@abstractmethod
Expand Down
1 change: 1 addition & 0 deletions modelconverter/packages/hailo/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ nvidia-dali-tf-plugin-cuda120==1.49.0
protobuf==3.20.3
matplotlib==3.10.6
pyparsing==2.4.7
onnx==1.17.0 # Hailo SDK still imports onnx.mapping and still pins protobuf==3.20.3.
1 change: 1 addition & 0 deletions modelconverter/packages/rvc4/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ psutil
numpy<2
polars
pytest # this is actually required by snpe packages
onnx==1.18.0 # SNPE's ONNX importer fails with onnx==1.21.0 in CI.
4 changes: 4 additions & 0 deletions modelconverter/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@
docker_exec,
get_container_memory_available,
get_container_memory_limit,
get_default_target_version,
get_docker_image,
get_local_docker_image,
in_docker,
)
from .environ import environ
Expand Down Expand Up @@ -64,8 +66,10 @@
"get_archive_input",
"get_container_memory_available",
"get_container_memory_limit",
"get_default_target_version",
"get_docker_image",
"get_extra_quant_tensors",
"get_local_docker_image",
"get_metadata",
"get_protocol",
"guess_new_layout",
Expand Down
49 changes: 26 additions & 23 deletions modelconverter/utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from modelconverter.utils.filesystem_utils import resolve_path
from modelconverter.utils.layout import make_default_layout
from modelconverter.utils.metadata import Metadata, get_metadata
from modelconverter.utils.onnx_compatibility import save_onnx_model
from modelconverter.utils.types import (
DataType,
Encoding,
Expand Down Expand Up @@ -641,14 +642,9 @@ def _get_onnx_node_info(
f"Output value info for node '{node_name}' not found."
)

shape = [
dim.dim_value for dim in output_value_info.type.tensor_type.shape.dim
]
if any(dim == 0 for dim in shape):
raise ValueError(
"Dynamic shapes are not supported. "
f"Shape of node '{node_name}' is {shape}."
)
shape = _get_static_onnx_shape(
output_value_info.type.tensor_type, f"node '{node_name}'"
)
data_type = output_value_info.type.tensor_type.elem_type

return shape, DataType.from_onnx_dtype(data_type)
Expand All @@ -662,12 +658,7 @@ def _get_onnx_tensor_info(
def extract_tensor_info(
tensor_type: TypeProto.Tensor,
) -> tuple[list[int], DataType]:
shape = [dim.dim_value for dim in tensor_type.shape.dim]
if any(dim == 0 for dim in shape):
raise ValueError(
"Dynamic shapes are not supported. "
f"Shape of tensor '{tensor_name}' is {shape}."
)
shape = _get_static_onnx_shape(tensor_type, f"tensor '{tensor_name}'")
return shape, DataType.from_onnx_dtype(tensor_type.elem_type)

for tensor in chain(model.graph.input, model.graph.output):
Expand All @@ -687,6 +678,21 @@ def extract_tensor_info(
raise NameError(f"Tensor '{tensor_name}' not found in the ONNX model.")


def _get_static_onnx_shape(
tensor_type: TypeProto.Tensor, tensor_name: str
) -> list[int]:
shape = []
for dim in tensor_type.shape.dim:
if dim.HasField("dim_value") and dim.dim_value > 0:
shape.append(dim.dim_value)
else:
raise ValueError(
"Dynamic shapes are not supported. "
f"Shape of {tensor_name} is {[d.dim_value for d in tensor_type.shape.dim]}."
)
return shape


def _get_onnx_inter_info(
model_path: Path, name: str
) -> tuple[list[int] | None, DataType | None]:
Expand Down Expand Up @@ -739,12 +745,9 @@ def generate_renamed_onnx(
if output_name in rename_dict:
node.output[i] = rename_dict[output_name]

if model_data_path:
onnx.save(
model,
str(output_path),
save_as_external_data=True,
location=f"{output_path.name}_data",
)
else:
onnx.save(model, str(output_path))
save_onnx_model(
model,
output_path,
save_as_external_data=model_data_path is not None,
location=f"{output_path.name}_data",
)
45 changes: 44 additions & 1 deletion modelconverter/utils/docker_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@
tmp_path: Path | None = None
try:
request = Request(url, headers={"User-Agent": "modelconverter"}) # noqa: S310
with urlopen(request, timeout=30) as response: # noqa: S310

Check failure on line 264 in modelconverter/utils/docker_utils.py

View workflow job for this annotation

GitHub Actions / semgrep/ci

Semgrep Issue

Detected a dynamic value being used with urllib. urllib supports 'file://' schemes, so a dynamic value controlled by a malicious actor may allow them to read arbitrary files. Audit uses of urllib calls to ensure user data cannot control the URLs, or consider using the 'requests' library instead.
if getattr(response, "status", 200) >= 400:
raise RuntimeError(
f"HTTP {response.status} while downloading {url}"
Expand Down Expand Up @@ -346,9 +346,26 @@
) -> str:
check_docker()

local_image = get_local_docker_image(target, bare_tag, version, image)
if local_image is not None:
return local_image

candidate_images = _get_candidate_docker_images(
target, bare_tag, version, image
)
return _get_or_build_docker_image(
target, bare_tag, version, candidate_images, image
)


def _get_candidate_docker_images(
target: Literal["rvc2", "rvc3", "rvc4", "hailo"],
bare_tag: str,
version: str,
image: str | None = None,
) -> list[str]:
tag_version = rvc4_tag_version(version) if target == "rvc4" else version
tag = f"{tag_version}-{bare_tag}"
client = get_docker_client_from_active_context()

if image is not None:
image_repo, image_tag = parse_repository_tag(image)
Expand All @@ -368,6 +385,21 @@
if tag_version != version and image_tag is None:
candidate_images.append(f"{image_repo}:{version}-{bare_tag}")

return candidate_images


def get_local_docker_image(
target: Literal["rvc2", "rvc3", "rvc4", "hailo"],
bare_tag: str,
version: str,
image: str | None = None,
) -> str | None:
check_docker()

candidate_images = _get_candidate_docker_images(
target, bare_tag, version, image
)
client = get_docker_client_from_active_context()
candidate_tags = set()
for candidate in candidate_images:
candidate_tags.add(candidate)
Expand All @@ -379,6 +411,17 @@
if tags:
return next(iter(tags))

return None


def _get_or_build_docker_image(
target: Literal["rvc2", "rvc3", "rvc4", "hailo"],
bare_tag: str,
version: str,
candidate_images: list[str],
image: str | None = None,
) -> str:
client = get_docker_client_from_active_context()
for candidate in candidate_images:
logger.warning(
f"Image '{candidate}' not found locally, pulling "
Expand Down
72 changes: 72 additions & 0 deletions modelconverter/utils/onnx_compatibility.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
from pathlib import Path

import ml_dtypes
import numpy as np
import onnx
from onnx.external_data_helper import convert_model_to_external_data


def ensure_onnx_helper_compatibility() -> None:
helper = onnx.helper

def _convert_scalar(
value: float, dtype: np.dtype, container: np.dtype
) -> int:
arr = np.asarray(value, dtype=dtype)
return arr.view(container).item()

if not hasattr(helper, "float32_to_bfloat16"):
helper.float32_to_bfloat16 = lambda value: _convert_scalar( # type: ignore[attr-defined]
value, ml_dtypes.bfloat16, np.uint16
)

if not hasattr(helper, "float32_to_float8e4m3"):
dtype_map = {
(False, False): ml_dtypes.float8_e4m3,
(True, False): ml_dtypes.float8_e4m3fn,
(True, True): ml_dtypes.float8_e4m3fnuz,
(False, True): ml_dtypes.float8_e4m3b11fnuz,
}

def float32_to_float8e4m3(
value: float, *, fn: bool = True, uz: bool = False
) -> int:
return _convert_scalar(value, dtype_map[(fn, uz)], np.uint8)

helper.float32_to_float8e4m3 = float32_to_float8e4m3 # type: ignore[attr-defined]


def save_onnx_model(
model: onnx.ModelProto,
output_path: str | Path,
*,
save_as_external_data: bool = False,
location: str | None = None,
) -> None:
output_path = Path(output_path)

if save_as_external_data:
external_data_path = output_path.with_name(
location or f"{output_path.name}_data"
)
if external_data_path.exists():
external_data_path.unlink()
convert_model_to_external_data(
model,
all_tensors_to_one_file=True,
location=external_data_path.name,
size_threshold=0,
convert_attribute=False,
)
onnx.save(
model,
str(output_path),
save_as_external_data=True,
all_tensors_to_one_file=True,
location=external_data_path.name,
size_threshold=0,
convert_attribute=False,
)
return

onnx.save(model, str(output_path))
Loading
Loading