mirror of
https://github.com/AntoineHX/smart_augmentation.git
synced 2025-05-05 04:30:45 +02:00
Changes since Teledyne
This commit is contained in:
parent
bd5dc63cff
commit
1060f18033
203 changed files with 24395 additions and 0 deletions
119
higher/smart_aug/nets/wideresnet_cifar.py
Normal file
119
higher/smart_aug/nets/wideresnet_cifar.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
"""
|
||||
wide resnet for cifar in pytorch
|
||||
Reference:
|
||||
[1] S. Zagoruyko and N. Komodakis. Wide residual networks. In BMVC, 2016.
|
||||
"""
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import math
|
||||
#from models.resnet_cifar import BasicBlock
|
||||
|
||||
def conv3x3(in_planes, out_planes, stride=1):
|
||||
" 3x3 convolution with padding "
|
||||
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
||||
|
||||
class BasicBlock(nn.Module):
|
||||
expansion=1
|
||||
|
||||
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
||||
super(BasicBlock, self).__init__()
|
||||
self.conv1 = conv3x3(inplanes, planes, stride)
|
||||
self.bn1 = nn.BatchNorm2d(planes)
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.conv2 = conv3x3(planes, planes)
|
||||
self.bn2 = nn.BatchNorm2d(planes)
|
||||
self.downsample = downsample
|
||||
self.stride = stride
|
||||
|
||||
def forward(self, x):
|
||||
residual = x
|
||||
|
||||
out = self.conv1(x)
|
||||
out = self.bn1(out)
|
||||
out = self.relu(out)
|
||||
|
||||
out = self.conv2(out)
|
||||
out = self.bn2(out)
|
||||
|
||||
if self.downsample is not None:
|
||||
residual = self.downsample(x)
|
||||
|
||||
out += residual
|
||||
out = self.relu(out)
|
||||
|
||||
return out
|
||||
|
||||
class Wide_ResNet_Cifar(nn.Module):
|
||||
|
||||
def __init__(self, block, layers, wfactor, num_classes=10):
|
||||
super(Wide_ResNet_Cifar, self).__init__()
|
||||
self.depth=layers[0]*6+2
|
||||
self.widen_factor=wfactor
|
||||
|
||||
self.inplanes = 16
|
||||
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
|
||||
self.bn1 = nn.BatchNorm2d(16)
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.layer1 = self._make_layer(block, 16*wfactor, layers[0])
|
||||
self.layer2 = self._make_layer(block, 32*wfactor, layers[1], stride=2)
|
||||
self.layer3 = self._make_layer(block, 64*wfactor, layers[2], stride=2)
|
||||
self.avgpool = nn.AvgPool2d(8, stride=1)
|
||||
self.fc = nn.Linear(64*block.expansion*wfactor, num_classes)
|
||||
|
||||
for m in self.modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
||||
m.weight.data.normal_(0, math.sqrt(2. / n))
|
||||
elif isinstance(m, nn.BatchNorm2d):
|
||||
m.weight.data.fill_(1)
|
||||
m.bias.data.zero_()
|
||||
|
||||
def _make_layer(self, block, planes, blocks, stride=1):
|
||||
downsample = None
|
||||
if stride != 1 or self.inplanes != planes * block.expansion:
|
||||
downsample = nn.Sequential(
|
||||
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
|
||||
nn.BatchNorm2d(planes * block.expansion)
|
||||
)
|
||||
|
||||
layers = []
|
||||
layers.append(block(self.inplanes, planes, stride, downsample))
|
||||
self.inplanes = planes * block.expansion
|
||||
for _ in range(1, blocks):
|
||||
layers.append(block(self.inplanes, planes))
|
||||
|
||||
return nn.Sequential(*layers)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv1(x)
|
||||
x = self.bn1(x)
|
||||
x = self.relu(x)
|
||||
|
||||
x = self.layer1(x)
|
||||
x = self.layer2(x)
|
||||
x = self.layer3(x)
|
||||
|
||||
x = self.avgpool(x)
|
||||
x = x.view(x.size(0), -1)
|
||||
x = self.fc(x)
|
||||
|
||||
return x
|
||||
|
||||
def __str__(self):
|
||||
""" Get name of model
|
||||
|
||||
"""
|
||||
return "Wide_ResNet_cifar%d_%d"%(self.depth,self.widen_factor)
|
||||
|
||||
|
||||
def wide_resnet_cifar(depth, width, **kwargs):
|
||||
assert (depth - 2) % 6 == 0
|
||||
n = int((depth - 2) / 6)
|
||||
return Wide_ResNet_Cifar(BasicBlock, [n, n, n], width, **kwargs)
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
net = wide_resnet_cifar(20, 10)
|
||||
y = net(torch.randn(1, 3, 32, 32))
|
||||
print(isinstance(net, Wide_ResNet_Cifar))
|
||||
print(y.size())
|
Loading…
Add table
Add a link
Reference in a new issue