Skip to content

Commit

Permalink
Add Inception_v3
Browse files Browse the repository at this point in the history
  • Loading branch information
huyvnphan committed Jul 11, 2019
1 parent 25986ef commit 39985cb
Show file tree
Hide file tree
Showing 17 changed files with 238 additions and 136 deletions.
30 changes: 14 additions & 16 deletions .ipynb_checkpoints/CIFAR10-checkpoint.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
"import torchvision\n",
"import torchvision.transforms as transforms\n",
"\n",
"from tqdm import tqdm_notebook as pbar\n",
"from tqdm import tqdm as pbar\n",
"from tensorboardX import SummaryWriter\n",
"from models import *"
]
Expand Down Expand Up @@ -204,9 +204,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Train on cuda if available\n",
"device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')\n",
"print(\"Using\", device)"
"model = inception_v3(pretrained=True)"
]
},
{
Expand All @@ -216,6 +214,17 @@
"## 6. Put everything together"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Train on cuda if available\n",
"device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')\n",
"print(\"Using\", device)"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down Expand Up @@ -251,18 +260,7 @@
"metadata": {},
"outputs": [],
"source": [
"# test_model(model, train_params)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# for i, a_model in enumerate(all_models):\n",
"# train_params['description'] = descriptions[i]\n",
"# train_model(a_model, train_params)"
"test_model(model, train_params)"
]
},
{
Expand Down
26 changes: 12 additions & 14 deletions .ipynb_checkpoints/README-checkpoint.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,20 @@
- I changed *number of class, filter size, stride, and padding* in the the original code so that it works with CIFAR-10.
- I also share the **weights** of these models, so you can just load the weights and use them.

## Statistic of supported models
## Statistics of supported models
| No. | Model | Test Accuracy | Parameters | State Dict Size |
|-----|--------------|---------------|------------|-----------------|
| 1 | vgg11_bn | 92.61% | 128.813M | 491 MB |
| 2 | vgg13_bn | 94.27% | 128.998M | 492 MB |
| 3 | vgg16_bn | 94.07% | 134.310M | 512 MB |
| 4 | vgg19_bn | 94.25% | 139.622M | 533 MB |
| 5 | resnet18 | 93.48% | 11.174M | 43 MB |
| 6 | resnet34 | 93.82% | 21.282M | 81 MB |
| 7 | resnet50 | 94.38% | 23.521M | 90 MB |
| 8 | mobilenet_v2 | 93.85% | 2.237M | 9 MB |
| 9 | densenet121 | 94.76% | 6.956M | 27 MB |
| 10 | googlenet | 95.08% | 5.491M | 21 MB |

I will add more models...
| 1 | vgg11_bn | 92.61% | 128.813 M | 491 MB |
| 2 | vgg13_bn | 94.27% | 128.998 M | 492 MB |
| 3 | vgg16_bn | 94.07% | 134.310 M | 512 MB |
| 4 | vgg19_bn | 94.25% | 139.622 M | 533 MB |
| 5 | resnet18 | 93.48% | 11.174 M | 43 MB |
| 6 | resnet34 | 93.82% | 21.282 M | 81 MB |
| 7 | resnet50 | 94.38% | 23.521 M | 90 MB |
| 8 | mobilenet_v2 | 93.85% | 2.237 M | 9 MB |
| 9 | densenet121 | 94.76% | 6.956 M | 27 MB |
| 10 | googlenet | 95.08% | 5.491 M | 21 MB |
| 11 | inception_v3 | 95.41% | 21.64 M | 83 MB |

## How To Use

Expand All @@ -40,7 +39,6 @@ my_model = vgg11_bn(pretrained=True)
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])]
```


## Training Hyper-paramters
- Batch size: 256
- Number of epochs: 600
Expand Down
30 changes: 14 additions & 16 deletions CIFAR10.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
"import torchvision\n",
"import torchvision.transforms as transforms\n",
"\n",
"from tqdm import tqdm_notebook as pbar\n",
"from tqdm import tqdm as pbar\n",
"from tensorboardX import SummaryWriter\n",
"from models import *"
]
Expand Down Expand Up @@ -204,9 +204,7 @@
"metadata": {},
"outputs": [],
"source": [
"# Train on cuda if available\n",
"device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')\n",
"print(\"Using\", device)"
"model = inception_v3(pretrained=True)"
]
},
{
Expand All @@ -216,6 +214,17 @@
"## 6. Put everything together"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Train on cuda if available\n",
"device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')\n",
"print(\"Using\", device)"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down Expand Up @@ -251,18 +260,7 @@
"metadata": {},
"outputs": [],
"source": [
"# test_model(model, train_params)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# for i, a_model in enumerate(all_models):\n",
"# train_params['description'] = descriptions[i]\n",
"# train_model(a_model, train_params)"
"test_model(model, train_params)"
]
},
{
Expand Down
26 changes: 12 additions & 14 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,20 @@
- I changed *number of class, filter size, stride, and padding* in the the original code so that it works with CIFAR-10.
- I also share the **weights** of these models, so you can just load the weights and use them.

## Statistic of supported models
## Statistics of supported models
| No. | Model | Test Accuracy | Parameters | State Dict Size |
|-----|--------------|---------------|------------|-----------------|
| 1 | vgg11_bn | 92.61% | 128.813M | 491 MB |
| 2 | vgg13_bn | 94.27% | 128.998M | 492 MB |
| 3 | vgg16_bn | 94.07% | 134.310M | 512 MB |
| 4 | vgg19_bn | 94.25% | 139.622M | 533 MB |
| 5 | resnet18 | 93.48% | 11.174M | 43 MB |
| 6 | resnet34 | 93.82% | 21.282M | 81 MB |
| 7 | resnet50 | 94.38% | 23.521M | 90 MB |
| 8 | mobilenet_v2 | 93.85% | 2.237M | 9 MB |
| 9 | densenet121 | 94.76% | 6.956M | 27 MB |
| 10 | googlenet | 95.08% | 5.491M | 21 MB |

I will add more models...
| 1 | vgg11_bn | 92.61% | 128.813 M | 491 MB |
| 2 | vgg13_bn | 94.27% | 128.998 M | 492 MB |
| 3 | vgg16_bn | 94.07% | 134.310 M | 512 MB |
| 4 | vgg19_bn | 94.25% | 139.622 M | 533 MB |
| 5 | resnet18 | 93.48% | 11.174 M | 43 MB |
| 6 | resnet34 | 93.82% | 21.282 M | 81 MB |
| 7 | resnet50 | 94.38% | 23.521 M | 90 MB |
| 8 | mobilenet_v2 | 93.85% | 2.237 M | 9 MB |
| 9 | densenet121 | 94.76% | 6.956 M | 27 MB |
| 10 | googlenet | 95.08% | 5.491 M | 21 MB |
| 11 | inception_v3 | 95.41% | 21.64 M | 83 MB |

## How To Use

Expand All @@ -40,7 +39,6 @@ my_model = vgg11_bn(pretrained=True)
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])]
```


## Training Hyper-paramters
- Batch size: 256
- Number of epochs: 600
Expand Down
2 changes: 1 addition & 1 deletion models/.ipynb_checkpoints/__init__-checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
from .resnet import *
from .vgg import *
from .densenet import *
# from .inception import *
from .inception import *
from .googlenet import *
57 changes: 20 additions & 37 deletions models/.ipynb_checkpoints/inception-checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,10 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import load_state_dict_from_url


__all__ = ['Inception3', 'inception_v3']


model_urls = {
# Inception v3 ported from TensorFlow
'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
}

_InceptionOuputs = namedtuple('InceptionOuputs', ['logits', 'aux_logits'])


Expand All @@ -32,37 +25,25 @@ def inception_v3(pretrained=False, progress=True, **kwargs):
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
model = Inception3()
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' in kwargs:
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
else:
original_aux_logits = True
model = Inception3(**kwargs)
state_dict = load_state_dict_from_url(model_urls['inception_v3_google'],
progress=progress)
state_dict = torch.load('models/state_dicts/inception_v3.pt', map_location='cpu')
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
del model.AuxLogits
return model

return Inception3(**kwargs)

return model

class Inception3(nn.Module):

def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
## CIFAR10: aux_logits True->False
def __init__(self, num_classes=10, aux_logits=False, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)

## CIFAR10: stride 2->1, padding 0 -> 1
self.Conv2d_1a_3x3 = BasicConv2d(3, 192, kernel_size=3, stride=1, padding=1)
# self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
# self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
# self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
# self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
Expand Down Expand Up @@ -99,18 +80,20 @@ def forward(self, x):
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# N x 3 x 299 x 299
x = self.Conv2d_1a_3x3(x)

## CIFAR10
# N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x)
# x = self.Conv2d_2a_3x3(x)
# N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x)
# x = self.Conv2d_2b_3x3(x)
# N x 64 x 147 x 147
x = F.max_pool2d(x, kernel_size=3, stride=2)
# x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x)
# x = self.Conv2d_3b_1x1(x)
# N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x)
# x = self.Conv2d_4a_3x3(x)
# N x 192 x 71 x 71
x = F.max_pool2d(x, kernel_size=3, stride=2)
# x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 192 x 35 x 35
x = self.Mixed_5b(x)
# N x 256 x 35 x 35
Expand Down
2 changes: 1 addition & 1 deletion models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,5 @@
from .resnet import *
from .vgg import *
from .densenet import *
# from .inception import *
from .inception import *
from .googlenet import *
Binary file modified models/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file modified models/__pycache__/inception.cpython-37.pyc
Binary file not shown.
Binary file modified models/__pycache__/shufflenetv2.cpython-37.pyc
Binary file not shown.
Loading

0 comments on commit 39985cb

Please sign in to comment.