-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcli.py
More file actions
180 lines (146 loc) · 6.5 KB
/
cli.py
File metadata and controls
180 lines (146 loc) · 6.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import click
from mmengine.runner import Runner
from mmengine.config import Config
import datasets
import models
from PIL import Image
import torch
from torchvision import transforms
from datasets.pipeline import build_pipeline
import onnxruntime as ort
@click.group()
def cli():
"""A CLI for training, validating, and testing a model using MMEngine."""
pass
@cli.command()
@click.option('--config', type=click.Path(exists=True), required=True, help='Path to the configuration file.')
@click.option('--resume', type=click.Path(exists=True), required=False, help='Path to a checkpoint file to resume from.')
def train(config, resume):
"""Train the model."""
cfg = Config.fromfile(config)
runner = Runner.from_cfg(cfg)
if resume:
runner.load_checkpoint(resume)
runner.train()
@cli.command()
@click.option('--config', type=click.Path(exists=True), required=True, help='Path to the configuration file.')
@click.option('--resume', type=click.Path(exists=True), required=True, help='Path to a checkpoint file to resume from.')
def val(config, resume):
"""Validate the model."""
cfg = Config.fromfile(config)
cfg.train_cfg = None # Disable training
cfg.train_dataloader = None
cfg.optim_wrapper = None
runner = Runner.from_cfg(cfg)
if resume:
runner.load_checkpoint(resume)
runner.val()
@cli.command()
@click.option('--config', type=click.Path(exists=True), required=True, help='Path to the configuration file.')
@click.option('--resume', type=click.Path(exists=True), required=True, help='Path to a checkpoint file to resume from.')
def test(config, resume):
"""Test the model."""
cfg = Config.fromfile(config)
cfg.train_dataloader = None
cfg.train_cfg = None # Disable training
cfg.val_dataloader = None
cfg.val_cfg = None # Disable validation
cfg.val_evaluator = None # Disable validation
cfg.optim_wrapper = None
runner = Runner.from_cfg(cfg)
if resume:
runner.load_checkpoint(resume)
runner.test()
@cli.command()
@click.option('--config', type=click.Path(exists=True), required=True, help='Path to the configuration file.')
@click.option('--checkpoint', type=click.Path(exists=True), required=True, help='Path to the checkpoint file for the trained model.')
@click.option('--image-path', type=click.Path(exists=True), required=True, help='Path to the input image for inference.')
@click.option('--image-size', type=int, required=True, help='Image size')
@click.option('--device', type=str, required=False, default="cpu", help='Device (cpu | cuda)')
def inference(config, checkpoint, image_path, image_size, device):
"""Perform inference on a single image."""
# Load the configuration and initialize the runner
cfg = Config.fromfile(config)
cfg.train_dataloader = None
cfg.train_cfg = None # Disable training
cfg.val_dataloader = None
cfg.val_cfg = None # Disable validation
cfg.val_evaluator = None # Disable validation
cfg.optim_wrapper = None
runner = Runner.from_cfg(cfg)
# Load the trained model checkpoint
runner.load_checkpoint(checkpoint)
model = runner.model.eval().to(device) # Set model to evaluation mode
# Load and preprocess the image
image = Image.open(image_path).convert('RGB')
preprocess = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
image_tensor = preprocess(image).to(device)
# Perform inference
with torch.no_grad():
output = model(imgs=[image_tensor], labels=[-1], mode='predict')
# Process and print the output
_, predicted_class = output[0]['pred_score'].max(0)
print(f'Predicted class: {predicted_class.item()}')
@cli.command()
@click.option('--config', type=click.Path(exists=True), required=True, help='Path to the configuration file.')
@click.option('--checkpoint', type=click.Path(exists=True), required=True, help='Path to the checkpoint file for the trained model.')
@click.option('--output', type=click.Path(), required=True, help='Path to save the exported ONNX model.')
@click.option('--image-size', type=int, required=True, help='Image size')
def export(config, checkpoint, output, image_size):
"""Export the model to ONNX format."""
cfg = Config.fromfile(config)
cfg.train_dataloader = None
cfg.train_cfg = None # Disable training
cfg.val_dataloader = None
cfg.val_cfg = None # Disable validation
cfg.val_evaluator = None # Disable validation
cfg.test_dataloader = None
cfg.test_cfg = None # Disable validation
cfg.test_evaluator = None # Disable validation
cfg.optim_wrapper = None # Disable validation
runner = Runner.from_cfg(cfg)
runner.load_checkpoint(checkpoint)
model = runner.model.eval()
model = model.to('cpu')
dummy_input = torch.randn(1, 3, image_size, image_size)
torch.onnx.export(
model,
dummy_input,
output,
export_params=True,
opset_version=11,
do_constant_folding=True,
input_names=['input'],
output_names=['output']
)
print(f"Model exported to {output}")
@cli.command()
@click.option('--model', type=click.Path(exists=True), required=True, help='Path to the ONNX model file.')
@click.option('--image-path', type=click.Path(exists=True), required=True, help='Path to the input image for ONNX inference.')
@click.option('--image-size', type=int, required=True, help='Image size')
def onnx_inference(model, image_path: str, image_size: int):
"""Perform inference using the ONNX model on a single image."""
# Initialize the ONNX runtime session
session = ort.InferenceSession(model)
# Define the preprocessing for the image
preprocess = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# Load and preprocess the image
image = Image.open(image_path).convert('RGB')
image_tensor = preprocess(image).unsqueeze(0).numpy() # Convert to NumPy for ONNX
# Perform inference
input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name
output = session.run([output_name], {input_name: image_tensor})[0]
# Process the output
predicted_class = output.argmax(axis=1)[0]
print(f'Predicted class from ONNX model: {predicted_class}')
if __name__ == '__main__':
cli()