1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253 |
- #!/usr/bin/env python3
- # -*- coding: utf-8 -*-
- """
- @Copyright (C) ansjer cop Video Technology Co.,Ltd.All rights reserved.
- @AUTHOR: ASJRD018
- @NAME: AnsjerFormal
- @software: PyCharm
- @DATE: 2019/4/1 9:41
- @Version: python3.6
- @MODIFY DECORD:ansjer dev
- @file: xls.py
- @Contact: chanjunkai@163.com
- """
- import torch
- from torch.autograd import Variable
- import torch.nn as nn
- import torch.nn.functional as F
- class Net(nn.Module):
- def __init__(self):
- super(Net, self).__init__()
- # 1 input image channel, 6 output channels, 5*5 square convolution
- # kernel
- self.conv1 = nn.Conv2d(1, 6, 5)
- self.conv2 = nn.Conv2d(6, 16, 5)
- # an affine operation: y = Wx + b
- self.fc1 = nn.Linear(16 * 5 * 5, 120)
- self.fc2 = nn.Linear(120, 84)
- self.fc3 = nn.Linear(84, 10)
- def forward(self, x):
- # max pooling over a (2, 2) window
- x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
- # If size is a square you can only specify a single number
- x = F.max_pool2d(F.relu(self.conv2(x)), 2)
- x = x.view(-1, self.num_flat_features(x))
- x = F.relu(self.fc1(x))
- x = F.relu(self.fc2(x))
- x = self.fc3(x)
- return x
- def num_flat_features(self, x):
- size = x.size()[1:] # all dimensions except the batch dimension
- num_features = 1
- for s in size:
- num_features *= s
- return num_features
- net = Net()
- print(net)
|