Skip to content

Commit

Permalink
remove unused code
Browse files Browse the repository at this point in the history
  • Loading branch information
sangkeun00 committed Jun 5, 2023
1 parent d8c13b9 commit 68d23fd
Showing 1 changed file with 8 additions and 65 deletions.
73 changes: 8 additions & 65 deletions examples/learning_to_reweight/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def forward(self, x):
class BasicBlock(nn.Module):
expansion = 1

def __init__(self, in_planes, planes, stride=1, option="A"):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
Expand All @@ -34,29 +34,14 @@ def __init__(self, in_planes, planes, stride=1, option="A"):

self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == "A":
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(
lambda x: functional.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant",
0,
)
)
elif option == "B":
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
self.shortcut = LambdaLayer(
lambda x: functional.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant",
0,
)
)

def forward(self, x):
out = functional.relu(self.bn1(self.conv1(x)))
Expand Down Expand Up @@ -100,48 +85,6 @@ def forward(self, x):
return out


class ResNet32MWN(nn.Module):
def __init__(self, num_classes=10, block=BasicBlock, num_blocks=[5, 5, 5]):
super(ResNet32MWN, self).__init__()
self.in_planes = 16
self.num_classes = num_classes

self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)

self.fc = nn.Linear(2 * num_classes, 1)

self.apply(_weights_init)

def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion

return nn.Sequential(*layers)

def forward(self, x, y):
out = functional.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = functional.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
out = functional.softmax(out, dim=-1)

one_hot = functional.one_hot(y, num_classes=self.num_classes)
out = torch.cat([out, one_hot], dim=1)
out = self.fc(out)
return torch.sigmoid(out)


class HiddenLayer(nn.Module):
def __init__(self, input_size, output_size):
super(HiddenLayer, self).__init__()
Expand Down

0 comments on commit 68d23fd

Please sign in to comment.