Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
coors=dict(min_shape=[5000, 3], opt_shape=[30000, 3], max_shape=[60000, 3]),
voxel_coors=dict(min_shape=[5000, 3], opt_shape=[30000, 3], max_shape=[60000, 3]),
inverse_map=dict(min_shape=[5000], opt_shape=[30000], max_shape=[60000]),
seg_logit=dict(min_shape=[5000, num_classes], opt_shape=[30000, num_classes], max_shape=[60000, num_classes]),
pred_probs=dict(min_shape=[5000, num_classes], opt_shape=[30000, num_classes], max_shape=[60000, num_classes]),
)

onnx_config = dict(
Expand All @@ -22,12 +22,12 @@
opset_version=16,
do_constant_folding=True,
input_names=["points", "coors", "voxel_coors", "inverse_map"],
output_names=["seg_logit"],
output_names=["pred_probs"],
dynamic_axes={
"points": {0: "num_points"},
"coors": {0: "num_points"},
"voxel_coors": {0: "num_unique_coors"},
"inverse_map": {0: "num_points"},
"seg_logit": {0: "num_points"},
"pred_probs": {0: "num_points"},
},
)
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,10 @@

num_classes = _base_.num_classes
tensorrt_config = dict(
points=dict(min_shape=[5000, 4], opt_shape=[60000, 4], max_shape=[120000, 4]),
coors=dict(min_shape=[5000, 3], opt_shape=[60000, 3], max_shape=[120000, 3]),
points=dict(min_shape=[5000, 4], opt_shape=[60000, 4], max_shape=[160000, 4]),
coors=dict(min_shape=[5000, 3], opt_shape=[60000, 3], max_shape=[160000, 3]),
voxel_coors=dict(min_shape=[3000, 3], opt_shape=[30000, 3], max_shape=[60000, 3]),
inverse_map=dict(min_shape=[5000], opt_shape=[60000], max_shape=[120000]),
seg_logit=dict(min_shape=[5000, num_classes], opt_shape=[60000, num_classes], max_shape=[120000, num_classes]),
inverse_map=dict(min_shape=[5000], opt_shape=[60000], max_shape=[160000]),
)

onnx_config = dict(
Expand All @@ -22,12 +21,12 @@
opset_version=16,
do_constant_folding=True,
input_names=["points", "coors", "voxel_coors", "inverse_map"],
output_names=["seg_logit"],
output_names=["pred_probs"],
dynamic_axes={
"points": {0: "num_points"},
"coors": {0: "num_points"},
"voxel_coors": {0: "num_unique_coors"},
"inverse_map": {0: "num_points"},
"seg_logit": {0: "num_points"},
"pred_probs": {0: "num_points"},
},
)
10 changes: 7 additions & 3 deletions projects/FRNet/deploy/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,8 +202,12 @@ def get_samples(self) -> List[Dict[str, Any]]:
samples: List[Dict[str, Any]] = []
for info in data_list:
pcd_path = os.path.join(self._dataset_dir, info["lidar_points"]["lidar_path"])
mask_path = os.path.join(self._dataset_dir, info.get("pts_semantic_mask_path"))
mask_categories = info.get("pts_semantic_mask_categories")
mask_path = info.get("pts_semantic_mask_path", None)
mask_categories = info.get("pts_semantic_mask_categories", None)
if mask_path is None or mask_categories is None:
raise ValueError(
f"Missing GT mask info for sample {pcd_path}. Check annotation file and dataset config."
)
for channel in lidar_sources:
samples.append(
{
Expand Down Expand Up @@ -233,7 +237,7 @@ def load_points(self, sample: Dict[str, Any]) -> npt.NDArray[np.float32]:

def load_gt(self, sample: Dict[str, Any]) -> Optional[npt.NDArray[np.int64]]:
"""Load merged GT mask, slice to source and remap via class_mapping."""
mask_path = os.path.join(self._dataset_dir, sample.get("pts_semantic_mask_path"))
mask_path = sample.get("pts_semantic_mask_path")
mask_categories = sample.get("pts_semantic_mask_categories")
if mask_path is None or mask_categories is None:
return None
Expand Down
2 changes: 1 addition & 1 deletion projects/FRNet/deploy/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def main() -> None:
torch_model = TorchModel(model_cfg=model_cfg, checkpoint_path=args.checkpoint)
onnx_model = OnnxModel(
deploy_cfg=deploy_cfg,
model=torch_model.model,
model=torch_model.export_model,
batch_inputs_dict=representative_input,
onnx_path=onnx_path,
deploy=args.deploy,
Expand Down
2 changes: 1 addition & 1 deletion projects/FRNet/deploy/onnx_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def __init__(
self._session = ort.InferenceSession(onnx_path)

def inference(self, batch_inputs_dict: dict) -> npt.NDArray[np.float32]:
"""Run ONNX Runtime inference, returns logits (N, num_classes)."""
"""Run ONNX Runtime inference, returns probabilities (N, num_classes)."""
coors = batch_inputs_dict["coors"].cpu().numpy()
points = batch_inputs_dict["points"].cpu().numpy()
voxel_coors = batch_inputs_dict["voxel_coors"].cpu().numpy()
Expand Down
4 changes: 2 additions & 2 deletions projects/FRNet/deploy/postprocessing.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Postprocessing for FRNet deployment.

Converts raw segmentation logits to class predictions with optional
Converts per-class probabilities to class predictions with optional
score thresholding (below threshold -> ignore_index).
"""

Expand All @@ -22,7 +22,7 @@ def __init__(self, score_threshold: float = 0.0, ignore_index: int = 0) -> None:
self.logger = MMLogger.get_current_instance()

def postprocess(self, predictions: npt.NDArray[np.float32]) -> npt.NDArray[np.intp]:
"""Convert logits (N, num_classes) to per-point class indices (N,)."""
"""Convert probabilities (N, num_classes) to per-point class indices (N,)."""
t_start = time()
result = np.where(
np.max(predictions, axis=1) >= self._score_threshold,
Expand Down
24 changes: 21 additions & 3 deletions projects/FRNet/deploy/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,39 @@
from mmengine.logging import MMLogger


class ExportModel(torch.nn.Module):
"""Deployment wrapper that exposes probabilities as the model output."""

def __init__(self, model: torch.nn.Module) -> None:
super().__init__()
self.model = model

def forward(self, batch_inputs_dict: dict, data_samples: dict | None = None) -> torch.Tensor:
"""Forward pass with softmax applied to logits.

Returns:
Tensor of shape (N, num_classes) with per-point class probabilities.
"""
predictions = self.model(batch_inputs_dict)
return torch.softmax(predictions["seg_logit"], dim=-1)


class TorchModel:
"""FRNet PyTorch model wrapper."""

def __init__(self, model_cfg: Config, checkpoint_path: str) -> None:
self.logger = MMLogger.get_current_instance()
self.model = self._build_model(model_cfg.model, checkpoint_path)
self.export_model = ExportModel(self.model)

def inference(self, batch_inputs_dict: dict) -> npt.NDArray[np.float32]:
"""Forward pass, returns segmentation logits (N, num_classes)."""
"""Forward pass, returns segmentation probabilities (N, num_classes)."""
t_start = time()
predictions = self.model(batch_inputs_dict)
predictions = self.export_model(batch_inputs_dict)
t_end = time()
latency = np.round((t_end - t_start) * 1e3, 2)
self.logger.info(f"Inference latency: {latency} ms")
return predictions["seg_logit"].cpu().detach().numpy()
return predictions.cpu().detach().numpy()

@staticmethod
def _build_model(model_cfg: dict, checkpoint_path: str) -> torch.nn.Module:
Expand Down
4 changes: 2 additions & 2 deletions projects/FRNet/deploy/trt_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,13 +151,13 @@ def _run_engine(self, tensors: Dict[str, Dict]) -> None:
self.logger.info(f"Inference latency: {latency} ms")

def inference(self, batch_inputs_dict: dict) -> npt.NDArray[np.float32]:
"""Run TensorRT inference, returns logits (N, num_classes)."""
"""Run TensorRT inference, returns probabilities (N, num_classes)."""
shapes_dict = {
"points": batch_inputs_dict["points"].shape,
"coors": batch_inputs_dict["coors"].shape,
"voxel_coors": batch_inputs_dict["voxel_coors"].shape,
"inverse_map": batch_inputs_dict["inverse_map"].shape,
"seg_logit": (batch_inputs_dict["points"].shape[0], self._deploy_cfg.num_classes),
"pred_probs": (batch_inputs_dict["points"].shape[0], self._deploy_cfg.num_classes),
}
tensors = self._allocate_buffers(shapes_dict)
self._transfer_input_to_device(batch_inputs_dict, tensors["input"])
Expand Down