|
| 1 | +""" |
| 2 | +------------------------------------------------- |
| 3 | + File Name: senet.py |
| 4 | + Author: Zhonghao Huang |
| 5 | + Date: 2019/9/9 |
| 6 | + Description: |
| 7 | +------------------------------------------------- |
| 8 | +""" |
| 9 | + |
| 10 | +from __future__ import print_function, division, absolute_import |
| 11 | +from collections import OrderedDict |
| 12 | +import math |
| 13 | +import torch |
| 14 | +import torch.nn as nn |
| 15 | +from torch.utils import model_zoo |
| 16 | + |
| 17 | +__all__ = ['SENet', 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', |
| 18 | + 'se_resnext50_32x4d', 'se_resnext101_32x4d'] |
| 19 | + |
| 20 | +pretrained_settings = { |
| 21 | + 'senet154': { |
| 22 | + 'imagenet': { |
| 23 | + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth', |
| 24 | + 'input_space': 'RGB', |
| 25 | + 'input_size': [3, 224, 224], |
| 26 | + 'input_range': [0, 1], |
| 27 | + 'mean': [0.485, 0.456, 0.406], |
| 28 | + 'std': [0.229, 0.224, 0.225], |
| 29 | + 'num_classes': 1000 |
| 30 | + } |
| 31 | + }, |
| 32 | + 'se_resnet50': { |
| 33 | + 'imagenet': { |
| 34 | + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth', |
| 35 | + 'input_space': 'RGB', |
| 36 | + 'input_size': [3, 224, 224], |
| 37 | + 'input_range': [0, 1], |
| 38 | + 'mean': [0.485, 0.456, 0.406], |
| 39 | + 'std': [0.229, 0.224, 0.225], |
| 40 | + 'num_classes': 1000 |
| 41 | + } |
| 42 | + }, |
| 43 | + 'se_resnet101': { |
| 44 | + 'imagenet': { |
| 45 | + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth', |
| 46 | + 'input_space': 'RGB', |
| 47 | + 'input_size': [3, 224, 224], |
| 48 | + 'input_range': [0, 1], |
| 49 | + 'mean': [0.485, 0.456, 0.406], |
| 50 | + 'std': [0.229, 0.224, 0.225], |
| 51 | + 'num_classes': 1000 |
| 52 | + } |
| 53 | + }, |
| 54 | + 'se_resnet152': { |
| 55 | + 'imagenet': { |
| 56 | + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth', |
| 57 | + 'input_space': 'RGB', |
| 58 | + 'input_size': [3, 224, 224], |
| 59 | + 'input_range': [0, 1], |
| 60 | + 'mean': [0.485, 0.456, 0.406], |
| 61 | + 'std': [0.229, 0.224, 0.225], |
| 62 | + 'num_classes': 1000 |
| 63 | + } |
| 64 | + }, |
| 65 | + 'se_resnext50_32x4d': { |
| 66 | + 'imagenet': { |
| 67 | + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth', |
| 68 | + 'input_space': 'RGB', |
| 69 | + 'input_size': [3, 224, 224], |
| 70 | + 'input_range': [0, 1], |
| 71 | + 'mean': [0.485, 0.456, 0.406], |
| 72 | + 'std': [0.229, 0.224, 0.225], |
| 73 | + 'num_classes': 1000 |
| 74 | + } |
| 75 | + }, |
| 76 | + 'se_resnext101_32x4d': { |
| 77 | + 'imagenet': { |
| 78 | + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth', |
| 79 | + 'input_space': 'RGB', |
| 80 | + 'input_size': [3, 224, 224], |
| 81 | + 'input_range': [0, 1], |
| 82 | + 'mean': [0.485, 0.456, 0.406], |
| 83 | + 'std': [0.229, 0.224, 0.225], |
| 84 | + 'num_classes': 1000 |
| 85 | + } |
| 86 | + }, |
| 87 | +} |
| 88 | + |
| 89 | + |
| 90 | +class SEModule(nn.Module): |
| 91 | + |
| 92 | + def __init__(self, channels, reduction): |
| 93 | + super(SEModule, self).__init__() |
| 94 | + self.avg_pool = nn.AdaptiveAvgPool2d(1) |
| 95 | + self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, |
| 96 | + padding=0) |
| 97 | + self.relu = nn.ReLU(inplace=True) |
| 98 | + self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, |
| 99 | + padding=0) |
| 100 | + self.sigmoid = nn.Sigmoid() |
| 101 | + |
| 102 | + def forward(self, x): |
| 103 | + module_input = x |
| 104 | + x = self.avg_pool(x) |
| 105 | + x = self.fc1(x) |
| 106 | + x = self.relu(x) |
| 107 | + x = self.fc2(x) |
| 108 | + x = self.sigmoid(x) |
| 109 | + return module_input * x |
| 110 | + |
| 111 | + |
| 112 | +class Bottleneck(nn.Module): |
| 113 | + """ |
| 114 | + Base class for bottlenecks that implements `forward()` method. |
| 115 | + """ |
| 116 | + |
| 117 | + def forward(self, x): |
| 118 | + residual = x |
| 119 | + |
| 120 | + out = self.conv1(x) |
| 121 | + out = self.bn1(out) |
| 122 | + out = self.relu(out) |
| 123 | + |
| 124 | + out = self.conv2(out) |
| 125 | + out = self.bn2(out) |
| 126 | + out = self.relu(out) |
| 127 | + |
| 128 | + out = self.conv3(out) |
| 129 | + out = self.bn3(out) |
| 130 | + |
| 131 | + if self.downsample is not None: |
| 132 | + residual = self.downsample(x) |
| 133 | + |
| 134 | + out = self.se_module(out) + residual |
| 135 | + out = self.relu(out) |
| 136 | + |
| 137 | + return out |
| 138 | + |
| 139 | + |
| 140 | +class SEBottleneck(Bottleneck): |
| 141 | + """ |
| 142 | + Bottleneck for SENet154. |
| 143 | + """ |
| 144 | + expansion = 4 |
| 145 | + |
| 146 | + def __init__(self, inplanes, planes, groups, reduction, stride=1, |
| 147 | + downsample=None): |
| 148 | + super(SEBottleneck, self).__init__() |
| 149 | + self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) |
| 150 | + self.bn1 = nn.BatchNorm2d(planes * 2) |
| 151 | + self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3, |
| 152 | + stride=stride, padding=1, groups=groups, |
| 153 | + bias=False) |
| 154 | + self.bn2 = nn.BatchNorm2d(planes * 4) |
| 155 | + self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, |
| 156 | + bias=False) |
| 157 | + self.bn3 = nn.BatchNorm2d(planes * 4) |
| 158 | + self.relu = nn.ReLU(inplace=True) |
| 159 | + self.se_module = SEModule(planes * 4, reduction=reduction) |
| 160 | + self.downsample = downsample |
| 161 | + self.stride = stride |
| 162 | + |
| 163 | + |
| 164 | +class SEResNetBottleneck(Bottleneck): |
| 165 | + """ |
| 166 | + ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe |
| 167 | + implementation and uses `stride=stride` in `conv1` and not in `conv2` |
| 168 | + (the latter is used in the torchvision implementation of ResNet). |
| 169 | + """ |
| 170 | + expansion = 4 |
| 171 | + |
| 172 | + def __init__(self, inplanes, planes, groups, reduction, stride=1, |
| 173 | + downsample=None): |
| 174 | + super(SEResNetBottleneck, self).__init__() |
| 175 | + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, |
| 176 | + stride=stride) |
| 177 | + self.bn1 = nn.BatchNorm2d(planes) |
| 178 | + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, |
| 179 | + groups=groups, bias=False) |
| 180 | + self.bn2 = nn.BatchNorm2d(planes) |
| 181 | + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) |
| 182 | + self.bn3 = nn.BatchNorm2d(planes * 4) |
| 183 | + self.relu = nn.ReLU(inplace=True) |
| 184 | + self.se_module = SEModule(planes * 4, reduction=reduction) |
| 185 | + self.downsample = downsample |
| 186 | + self.stride = stride |
| 187 | + |
| 188 | + |
| 189 | +class SEResNeXtBottleneck(Bottleneck): |
| 190 | + """ |
| 191 | + ResNeXt bottleneck type C with a Squeeze-and-Excitation module. |
| 192 | + """ |
| 193 | + expansion = 4 |
| 194 | + |
| 195 | + def __init__(self, inplanes, planes, groups, reduction, stride=1, |
| 196 | + downsample=None, base_width=4): |
| 197 | + super(SEResNeXtBottleneck, self).__init__() |
| 198 | + width = math.floor(planes * (base_width / 64)) * groups |
| 199 | + self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, |
| 200 | + stride=1) |
| 201 | + self.bn1 = nn.BatchNorm2d(width) |
| 202 | + self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, |
| 203 | + padding=1, groups=groups, bias=False) |
| 204 | + self.bn2 = nn.BatchNorm2d(width) |
| 205 | + self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) |
| 206 | + self.bn3 = nn.BatchNorm2d(planes * 4) |
| 207 | + self.relu = nn.ReLU(inplace=True) |
| 208 | + self.se_module = SEModule(planes * 4, reduction=reduction) |
| 209 | + self.downsample = downsample |
| 210 | + self.stride = stride |
| 211 | + |
| 212 | + |
| 213 | +class SENet(nn.Module): |
| 214 | + |
| 215 | + def __init__(self, block, layers, groups, reduction, dropout_p=0.2, |
| 216 | + inplanes=128, input_3x3=True, downsample_kernel_size=3, |
| 217 | + downsample_padding=1, last_stride=2): |
| 218 | + """ |
| 219 | + Parameters |
| 220 | + ---------- |
| 221 | + block (nn.Module): Bottleneck class. |
| 222 | + - For SENet154: SEBottleneck |
| 223 | + - For SE-ResNet models: SEResNetBottleneck |
| 224 | + - For SE-ResNeXt models: SEResNeXtBottleneck |
| 225 | + layers (list of ints): Number of residual blocks for 4 layers of the |
| 226 | + network (layer1...layer4). |
| 227 | + groups (int): Number of groups for the 3x3 convolution in each |
| 228 | + bottleneck block. |
| 229 | + - For SENet154: 64 |
| 230 | + - For SE-ResNet models: 1 |
| 231 | + - For SE-ResNeXt models: 32 |
| 232 | + reduction (int): Reduction ratio for Squeeze-and-Excitation modules. |
| 233 | + - For all models: 16 |
| 234 | + dropout_p (float or None): Drop probability for the Dropout layer. |
| 235 | + If `None` the Dropout layer is not used. |
| 236 | + - For SENet154: 0.2 |
| 237 | + - For SE-ResNet models: None |
| 238 | + - For SE-ResNeXt models: None |
| 239 | + inplanes (int): Number of input channels for layer1. |
| 240 | + - For SENet154: 128 |
| 241 | + - For SE-ResNet models: 64 |
| 242 | + - For SE-ResNeXt models: 64 |
| 243 | + input_3x3 (bool): If `True`, use three 3x3 convolutions instead of |
| 244 | + a single 7x7 convolution in layer0. |
| 245 | + - For SENet154: True |
| 246 | + - For SE-ResNet models: False |
| 247 | + - For SE-ResNeXt models: False |
| 248 | + downsample_kernel_size (int): Kernel size for downsampling convolutions |
| 249 | + in layer2, layer3 and layer4. |
| 250 | + - For SENet154: 3 |
| 251 | + - For SE-ResNet models: 1 |
| 252 | + - For SE-ResNeXt models: 1 |
| 253 | + downsample_padding (int): Padding for downsampling convolutions in |
| 254 | + layer2, layer3 and layer4. |
| 255 | + - For SENet154: 1 |
| 256 | + - For SE-ResNet models: 0 |
| 257 | + - For SE-ResNeXt models: 0 |
| 258 | + num_classes (int): Number of outputs in `last_linear` layer. |
| 259 | + - For all models: 1000 |
| 260 | + """ |
| 261 | + super(SENet, self).__init__() |
| 262 | + self.inplanes = inplanes |
| 263 | + if input_3x3: |
| 264 | + layer0_modules = [ |
| 265 | + ('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1, |
| 266 | + bias=False)), |
| 267 | + ('bn1', nn.BatchNorm2d(64)), |
| 268 | + ('relu1', nn.ReLU(inplace=True)), |
| 269 | + ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, |
| 270 | + bias=False)), |
| 271 | + ('bn2', nn.BatchNorm2d(64)), |
| 272 | + ('relu2', nn.ReLU(inplace=True)), |
| 273 | + ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, |
| 274 | + bias=False)), |
| 275 | + ('bn3', nn.BatchNorm2d(inplanes)), |
| 276 | + ('relu3', nn.ReLU(inplace=True)), |
| 277 | + ] |
| 278 | + else: |
| 279 | + layer0_modules = [ |
| 280 | + ('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2, |
| 281 | + padding=3, bias=False)), |
| 282 | + ('bn1', nn.BatchNorm2d(inplanes)), |
| 283 | + ('relu1', nn.ReLU(inplace=True)), |
| 284 | + ] |
| 285 | + # To preserve compatibility with Caffe weights `ceil_mode=True` |
| 286 | + # is used instead of `padding=1`. |
| 287 | + layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, |
| 288 | + ceil_mode=True))) |
| 289 | + self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) |
| 290 | + self.layer1 = self._make_layer( |
| 291 | + block, |
| 292 | + planes=64, |
| 293 | + blocks=layers[0], |
| 294 | + groups=groups, |
| 295 | + reduction=reduction, |
| 296 | + downsample_kernel_size=1, |
| 297 | + downsample_padding=0 |
| 298 | + ) |
| 299 | + self.layer2 = self._make_layer( |
| 300 | + block, |
| 301 | + planes=128, |
| 302 | + blocks=layers[1], |
| 303 | + stride=2, |
| 304 | + groups=groups, |
| 305 | + reduction=reduction, |
| 306 | + downsample_kernel_size=downsample_kernel_size, |
| 307 | + downsample_padding=downsample_padding |
| 308 | + ) |
| 309 | + self.layer3 = self._make_layer( |
| 310 | + block, |
| 311 | + planes=256, |
| 312 | + blocks=layers[2], |
| 313 | + stride=2, |
| 314 | + groups=groups, |
| 315 | + reduction=reduction, |
| 316 | + downsample_kernel_size=downsample_kernel_size, |
| 317 | + downsample_padding=downsample_padding |
| 318 | + ) |
| 319 | + self.layer4 = self._make_layer( |
| 320 | + block, |
| 321 | + planes=512, |
| 322 | + blocks=layers[3], |
| 323 | + stride=last_stride, |
| 324 | + groups=groups, |
| 325 | + reduction=reduction, |
| 326 | + downsample_kernel_size=downsample_kernel_size, |
| 327 | + downsample_padding=downsample_padding |
| 328 | + ) |
| 329 | + self.avg_pool = nn.AvgPool2d(7, stride=1) |
| 330 | + self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None |
| 331 | + |
| 332 | + def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, |
| 333 | + downsample_kernel_size=1, downsample_padding=0): |
| 334 | + downsample = None |
| 335 | + if stride != 1 or self.inplanes != planes * block.expansion: |
| 336 | + downsample = nn.Sequential( |
| 337 | + nn.Conv2d(self.inplanes, planes * block.expansion, |
| 338 | + kernel_size=downsample_kernel_size, stride=stride, |
| 339 | + padding=downsample_padding, bias=False), |
| 340 | + nn.BatchNorm2d(planes * block.expansion), |
| 341 | + ) |
| 342 | + |
| 343 | + layers = [] |
| 344 | + layers.append(block(self.inplanes, planes, groups, reduction, stride, |
| 345 | + downsample)) |
| 346 | + self.inplanes = planes * block.expansion |
| 347 | + for i in range(1, blocks): |
| 348 | + layers.append(block(self.inplanes, planes, groups, reduction)) |
| 349 | + |
| 350 | + return nn.Sequential(*layers) |
| 351 | + |
| 352 | + def load_param(self, model_path): |
| 353 | + param_dict = torch.load(model_path) |
| 354 | + for i in param_dict: |
| 355 | + if 'last_linear' in i: |
| 356 | + continue |
| 357 | + self.state_dict()[i].copy_(param_dict[i]) |
| 358 | + |
| 359 | + def forward(self, x): |
| 360 | + x = self.layer0(x) |
| 361 | + x = self.layer1(x) |
| 362 | + x = self.layer2(x) |
| 363 | + x = self.layer3(x) |
| 364 | + x = self.layer4(x) |
| 365 | + return x |
0 commit comments