-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathinference_znet.py
More file actions
157 lines (128 loc) · 5.07 KB
/
inference_znet.py
File metadata and controls
157 lines (128 loc) · 5.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
from copy import deepcopy
from datetime import datetime
from pathlib import Path
import cv2
import numpy as np
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import tqdm
import typer
from torch.utils.tensorboard import SummaryWriter
from effdet.data.dataset import ImagePoseDatset
from effdet.data.loader import DetectionFastCollate
from effdet.data.parsers import PoseMeParserCfg
from effdet.data.transforms import (
transforms_z_val_partial,
CropToBBox,
Resize,
)
from effdet.znet import ZRegressionNet
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
app = typer.Typer(no_args_is_help=True)
@app.command()
def infer(
checkpoint: Path = typer.Argument(..., help="Path to checkpoint"),
data_dir: Path = typer.Argument(
Path("../../data/fleckenzwerg_dataset_imagepose_cleaned"),
help="Path to data directory",
),
split: str = typer.Option("train", help="Split to infer on"),
batch_size: int = typer.Option(64, help="Batch size"),
num_workers: int = typer.Option(0, help="Number of workers"),
):
model = ZRegressionNet().to("cuda")
timm.models.load_checkpoint(model, checkpoint.as_posix())
parser_cfg = PoseMeParserCfg(
data_dir=data_dir / split, has_labels=True, extension=".png"
)
dataset = ImagePoseDatset(
data_dir=data_dir / split,
parser="poseme",
transform=transforms_z_val_partial(),
parser_kwargs={"cfg": parser_cfg},
)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
persistent_workers=True if num_workers > 0 else False,
collate_fn=DetectionFastCollate(),
)
metric_fn = F.mse_loss
loss, outputs, targets = validate_epoch(model, loader, metric_fn)
df = pd.DataFrame({"outputs": outputs, "targets": targets})
sns.histplot(df, binwidth=5, binrange=(400, 470), kde=True)
plt.savefig(checkpoint.parent / f"hist_{split}.png")
plt.show(block=True)
def get_crops(batch_input, batch_target):
crop_inputs = []
crop_targets = []
for item_idx in range(len(batch_input)):
input = batch_input[item_idx]
target = {k: v[item_idx] for k, v in batch_target.items()}
for crop_idx in range(np.count_nonzero(target["cls"] == 1)):
crop_input = deepcopy(input.cpu().numpy())
crop_target = deepcopy({k: v.cpu().numpy() for k, v in target.items()})
crop_input = crop_input.transpose(1, 2, 0) # CHW to HWC
crop_input, crop_target = CropToBBox(idx=crop_idx)(crop_input, crop_target)
crop_input, crop_target = Resize()(crop_input, crop_target)
crop_input = crop_input.transpose(2, 1, 0) # HWC to CHW
crop_inputs.append(crop_input)
crop_targets.append(crop_target)
# Stack into a batch
crop_inputs = torch.from_numpy(np.stack(crop_inputs))
crop_targets = {
k: torch.from_numpy(np.stack([v[k] for v in crop_targets]))
for k in crop_targets[0].keys()
}
return crop_inputs, crop_targets
def validate_epoch(
model,
loader,
metric_fn,
):
losses_m = timm.utils.AverageMeter()
z_error_m = timm.utils.AverageMeter()
max_z_error = 0
# z_mean = 0.340 * 1000
z_mean = 0.460 * 1000
z_std = 0.050 * 1000
tqdmloader = tqdm.tqdm(loader, desc="Val Batch", position=2, postfix={"z_error": 0})
model.eval()
outputs = []
targets = []
with torch.no_grad():
# Since we are using the partial transform, we need to manually
# apply the crop, resize and HWC2CHW transforms.
for batch_input, batch_target in tqdmloader:
crop_input, crop_target = get_crops(batch_input, batch_target)
print(f"Batch of {len(batch_input)} has {len(crop_input)} crops.")
# image_grid = torchvision.utils.make_grid(
# crop_input, nrow=10, normalize=True, scale_each=True
# )
# cv2.imshow("val_grid", image_grid.numpy().transpose(1, 2, 0))
# cv2.waitKey(1)
crop_input = crop_input.type(torch.float32).cuda()
target_z = crop_target["translation"][:, 0, [2]].type(torch.float32).cuda()
output_z = model(crop_input)
metric = metric_fn(output_z, target_z)
denormalized_output = output_z.detach().cpu().numpy() * z_std + z_mean
denormalized_target = target_z.detach().cpu().numpy() * z_std + z_mean
z_error = np.abs(denormalized_output - denormalized_target)
outputs.extend(denormalized_output.flatten())
targets.extend(denormalized_target.flatten())
losses_m.update(metric.item(), crop_input.size(0))
z_error_m.update(z_error.mean(), crop_input.size(0))
max_z_error = np.max([max_z_error, z_error.max()])
tqdmloader.set_postfix(
loss=losses_m.avg, z_error=z_error_m.avg, max_z_error=max_z_error
)
return losses_m.avg, outputs, targets
if __name__ == "__main__":
app()