Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
M
Model-Transfer-Adaptability
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
haoyifan
Model-Transfer-Adaptability
Commits
12b90b92
Commit
12b90b92
authored
Apr 07, 2023
by
Zhihong Ma
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix: modify resnet and module for trail
parent
f641275d
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
62 additions
and
65 deletions
+62
-65
mzh/module.py
+3
-0
mzh/resnet.py
+59
-65
No files found.
mzh/module.py
View file @
12b90b92
...
@@ -527,6 +527,8 @@ class QLinear(QModule):
...
@@ -527,6 +527,8 @@ class QLinear(QModule):
#需要加入qi 对于resnet,并不是所有的qrelu都不需要统计qi (对于残差结构,需要做elementwise add,则不能直接通过使用前面某一层的qo作为该层的qi) / 但qo可以不加,与qi没有太大区别
#需要加入qi,qo 对于resnet,并不是所有的qrelu都不需要统计qi,qo
class
QReLU
(
QModule
):
class
QReLU
(
QModule
):
def
__init__
(
self
,
qi
=
False
,
num_bits
=
None
,
n_exp
=
4
,
mode
=
1
):
def
__init__
(
self
,
qi
=
False
,
num_bits
=
None
,
n_exp
=
4
,
mode
=
1
):
...
@@ -542,6 +544,7 @@ class QReLU(QModule):
...
@@ -542,6 +544,7 @@ class QReLU(QModule):
if
not
hasattr
(
self
,
'qi'
)
and
qi
is
None
:
if
not
hasattr
(
self
,
'qi'
)
and
qi
is
None
:
raise
ValueError
(
'qi is not existed, should be provided.'
)
raise
ValueError
(
'qi is not existed, should be provided.'
)
# 若非none,则是接受外部给的值,否则仍使用自己统计的qi
if
qi
is
not
None
:
if
qi
is
not
None
:
self
.
qi
=
qi
self
.
qi
=
qi
...
...
mzh/resnet.py
View file @
12b90b92
...
@@ -215,8 +215,8 @@ class ResNet(nn.Module):
...
@@ -215,8 +215,8 @@ class ResNet(nn.Module):
print
(
'resnet init:'
+
str
(
GlobalVariables
.
SELF_INPLANES
))
print
(
'resnet init:'
+
str
(
GlobalVariables
.
SELF_INPLANES
))
# 输入层
# 输入层
self
.
conv1
=
nn
.
Conv2d
(
3
,
16
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
self
.
conv1
=
nn
.
Conv2d
(
3
,
16
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias
=
Fals
e
)
bias
=
Tru
e
)
self
.
bn1
=
nn
.
BatchNorm2d
(
16
)
#
self.bn1 = nn.BatchNorm2d(16)
self
.
relu
=
nn
.
ReLU
()
self
.
relu
=
nn
.
ReLU
()
# 残差层(4 个阶段,每个阶段包含 6n+2 个卷积层)
# 残差层(4 个阶段,每个阶段包含 6n+2 个卷积层)
...
@@ -224,17 +224,15 @@ class ResNet(nn.Module):
...
@@ -224,17 +224,15 @@ class ResNet(nn.Module):
# self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
# self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
# self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
# self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
# self.layer4 = self._make_layer(block, 128, layers[3], stride=2)
# self.layer4 = self._make_layer(block, 128, layers[3], stride=2)
self
.
layer1
=
MakeLayer
(
block
,
16
,
layers
[
0
])
self
.
layer1
=
MakeLayer
(
block
,
16
,
layers
[
0
]
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
self
.
layer2
=
MakeLayer
(
block
,
32
,
layers
[
1
],
stride
=
2
)
self
.
layer2
=
MakeLayer
(
block
,
32
,
layers
[
1
],
stride
=
2
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
self
.
layer3
=
MakeLayer
(
block
,
64
,
layers
[
2
],
stride
=
2
)
self
.
layer3
=
MakeLayer
(
block
,
64
,
layers
[
2
],
stride
=
2
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
self
.
layer4
=
MakeLayer
(
block
,
128
,
layers
[
3
],
stride
=
2
)
self
.
layer4
=
MakeLayer
(
block
,
128
,
layers
[
3
],
stride
=
2
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
# 分类层
# 分类层
self
.
avgpool
=
nn
.
AdaptiveAvgPool2d
((
1
,
1
))
self
.
avgpool
=
nn
.
AdaptiveAvgPool2d
((
1
,
1
))
self
.
fc
=
nn
.
Linear
(
128
*
block
.
expansion
,
num_classes
)
self
.
fc
=
nn
.
Linear
(
128
*
block
.
expansion
,
num_classes
)
# self.layers_to_quantize = [self.conv1, self.bn1, self.relu, self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool, self.fc]
# 参数初始化
# 参数初始化
for
m
in
self
.
modules
():
for
m
in
self
.
modules
():
...
@@ -271,7 +269,7 @@ class ResNet(nn.Module):
...
@@ -271,7 +269,7 @@ class ResNet(nn.Module):
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
# 输入层
# 输入层
x
=
self
.
conv1
(
x
)
x
=
self
.
conv1
(
x
)
x
=
self
.
bn1
(
x
)
#
x = self.bn1(x)
x
=
self
.
relu
(
x
)
x
=
self
.
relu
(
x
)
# 这里相比于imagenet的,少了一个maxpool,因为cifar10本身图片就小,如果再pool就太小了
# 这里相比于imagenet的,少了一个maxpool,因为cifar10本身图片就小,如果再pool就太小了
...
@@ -291,8 +289,11 @@ class ResNet(nn.Module):
...
@@ -291,8 +289,11 @@ class ResNet(nn.Module):
return
out
return
out
def
quantize
(
self
,
num_bits
=
8
):
def
quantize
(
self
,
num_bits
=
8
):
self
.
qconvbnrelu1
=
QConvBNReLU
(
self
.
conv1
,
self
.
bn1
,
qi
=
True
,
qo
=
True
,
num_bits
=
num_bits
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
# self.qconvbnrelu1 = QConvBNReLU(self.conv1,self.bn1,qi=True,qo=True,num_bits=num_bits,n_exp=self.n_exp, mode=self.mode)
self
.
qconv1
=
QConv2d
(
self
.
conv1
,
qi
=
True
,
qo
=
True
,
num_bits
=
num_bits
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
# 没有输入num_bits 需修改
# 没有输入num_bits 需修改
self
.
qrelu1
=
QReLU
(
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
self
.
layer1
.
quantize
(
num_bits
=
num_bits
)
self
.
layer1
.
quantize
(
num_bits
=
num_bits
)
self
.
layer2
.
quantize
(
num_bits
=
num_bits
)
self
.
layer2
.
quantize
(
num_bits
=
num_bits
)
self
.
layer3
.
quantize
(
num_bits
=
num_bits
)
self
.
layer3
.
quantize
(
num_bits
=
num_bits
)
...
@@ -306,7 +307,8 @@ class ResNet(nn.Module):
...
@@ -306,7 +307,8 @@ class ResNet(nn.Module):
# out = F.softmax(x, dim=1)
# out = F.softmax(x, dim=1)
# return out
# return out
x
=
self
.
qconvbnrelu1
(
x
)
x
=
self
.
qconv1
(
x
)
x
=
self
.
qrelu1
(
x
)
x
=
self
.
layer1
.
quantize_forward
(
x
)
x
=
self
.
layer1
.
quantize_forward
(
x
)
x
=
self
.
layer2
.
quantize_forward
(
x
)
x
=
self
.
layer2
.
quantize_forward
(
x
)
x
=
self
.
layer3
.
quantize_forward
(
x
)
x
=
self
.
layer3
.
quantize_forward
(
x
)
...
@@ -320,20 +322,22 @@ class ResNet(nn.Module):
...
@@ -320,20 +322,22 @@ class ResNet(nn.Module):
def
freeze
(
self
):
def
freeze
(
self
):
self
.
qconvbnrelu1
.
freeze
()
# 因为作为第一层是有qi的,所以freeze的时候无需再重新提供qi
self
.
qconv1
.
freeze
()
# 因为作为第一层是有qi的,所以freeze的时候无需再重新提供qi
qo
=
self
.
layer1
.
freeze
(
qinput
=
self
.
qconvbnrelu1
.
qo
)
self
.
qrelu1
.
freeze
(
self
.
qconv1
.
qo
)
qo
=
self
.
layer1
.
freeze
(
qinput
=
self
.
qconv1
.
qo
)
qo
=
self
.
layer2
.
freeze
(
qinput
=
qo
)
qo
=
self
.
layer2
.
freeze
(
qinput
=
qo
)
qo
=
self
.
layer3
.
freeze
(
qinput
=
qo
)
qo
=
self
.
layer3
.
freeze
(
qinput
=
qo
)
qo
=
self
.
layer4
.
freeze
(
qinput
=
qo
)
qo
=
self
.
layer4
.
freeze
(
qinput
=
qo
)
self
.
qavgpool1
.
freeze
(
qo
)
self
.
qavgpool1
.
freeze
(
qo
)
self
.
qfc1
.
freeze
(
qi
=
qo
)
self
.
qfc1
.
freeze
(
qi
=
self
.
qavgpool1
.
qo
)
def
fakefreeze
(
self
):
def
fakefreeze
(
self
):
pass
pass
def
quantize_inference
(
self
,
x
):
def
quantize_inference
(
self
,
x
):
qx
=
self
.
qconvbnrelu1
.
qi
.
quantize_tensor
(
x
,
mode
=
self
.
mode
)
qx
=
self
.
qconv1
.
qi
.
quantize_tensor
(
x
,
mode
=
self
.
mode
)
qx
=
self
.
qconvbnrelu1
.
quantize_inference
(
qx
)
qx
=
self
.
qconv1
.
quantize_inference
(
qx
)
qx
=
self
.
qrelu1
.
quantize_inference
(
qx
)
qx
=
self
.
layer1
.
quantize_inference
(
qx
)
qx
=
self
.
layer1
.
quantize_inference
(
qx
)
qx
=
self
.
layer2
.
quantize_inference
(
qx
)
qx
=
self
.
layer2
.
quantize_inference
(
qx
)
qx
=
self
.
layer3
.
quantize_inference
(
qx
)
qx
=
self
.
layer3
.
quantize_inference
(
qx
)
...
@@ -360,13 +364,13 @@ class BasicBlock(nn.Module):
...
@@ -360,13 +364,13 @@ class BasicBlock(nn.Module):
# 第一个卷积层
# 第一个卷积层
self
.
conv1
=
nn
.
Conv2d
(
inplanes
,
planes
,
kernel_size
=
3
,
stride
=
stride
,
self
.
conv1
=
nn
.
Conv2d
(
inplanes
,
planes
,
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
,
bias
=
Fals
e
)
padding
=
1
,
bias
=
Tru
e
)
self
.
bn1
=
nn
.
BatchNorm2d
(
planes
)
#
self.bn1 = nn.BatchNorm2d(planes)
# 第二个卷积层
# 第二个卷积层
self
.
conv2
=
nn
.
Conv2d
(
planes
,
planes
,
kernel_size
=
3
,
stride
=
1
,
self
.
conv2
=
nn
.
Conv2d
(
planes
,
planes
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias
=
Fals
e
)
padding
=
1
,
bias
=
Tru
e
)
self
.
bn2
=
nn
.
BatchNorm2d
(
planes
)
#
self.bn2 = nn.BatchNorm2d(planes)
# shortcut
# shortcut
self
.
relu
=
nn
.
ReLU
()
self
.
relu
=
nn
.
ReLU
()
...
@@ -380,11 +384,12 @@ class BasicBlock(nn.Module):
...
@@ -380,11 +384,12 @@ class BasicBlock(nn.Module):
identity
=
x
identity
=
x
out
=
self
.
conv1
(
x
)
out
=
self
.
conv1
(
x
)
out
=
self
.
bn1
(
out
)
#
out = self.bn1(out)
out
=
self
.
relu
(
out
)
out
=
self
.
relu
(
out
)
out
=
self
.
conv2
(
out
)
out
=
self
.
conv2
(
out
)
out
=
self
.
bn2
(
out
)
# out = self.bn2(out)
if
self
.
downsample
is
not
None
:
if
self
.
downsample
is
not
None
:
identity
=
self
.
downsample
(
identity
)
identity
=
self
.
downsample
(
identity
)
...
@@ -395,54 +400,64 @@ class BasicBlock(nn.Module):
...
@@ -395,54 +400,64 @@ class BasicBlock(nn.Module):
return
out
return
out
def
quantize
(
self
,
num_bits
=
8
):
def
quantize
(
self
,
num_bits
=
8
):
self
.
qconvbnrelu1
=
QConvBNReLU
(
self
.
conv1
,
self
.
bn1
,
qi
=
False
,
qo
=
True
,
num_bits
=
num_bits
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
# self.qconvbnrelu1 = QConvBNReLU(self.conv1,self.bn1,qi=False,qo=True,num_bits=num_bits,n_exp=self.n_exp,mode=self.mode)
self
.
qconvbn1
=
QConvBN
(
self
.
conv2
,
self
.
bn2
,
qi
=
False
,
qo
=
True
,
num_bits
=
num_bits
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
# self.qconvbn1 = QConvBN(self.conv2,self.bn2,qi=False,qo=True,num_bits=num_bits,n_exp=self.n_exp,mode=self.mode)
self
.
qconv1
=
QConv2d
(
self
.
conv1
,
qi
=
False
,
qo
=
True
,
num_bits
=
num_bits
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
self
.
qrelu1
=
QReLU
(
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
self
.
qconv2
=
QConv2d
(
self
.
conv2
,
qi
=
False
,
qo
=
True
,
num_bits
=
num_bits
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
if
self
.
downsample
is
not
None
:
if
self
.
downsample
is
not
None
:
self
.
qconvbn2
=
QConvBN
(
self
.
downsample
[
0
],
self
.
downsample
[
1
],
qi
=
False
,
qo
=
True
,
num_bits
=
num_bits
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
# self.qconvbn2 = QConvBN(self.downsample[0],self.downsample[1],qi=False,qo=True,num_bits=num_bits,n_exp=self.n_exp,mode=self.mode)
self
.
qconv3
=
QConv2d
(
self
.
downsample
[
0
],
qi
=
False
,
qo
=
True
,
num_bits
=
num_bits
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
self
.
qrelu
1
=
QReLU
(
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
self
.
qrelu
2
=
QReLU
(
qi
=
True
,
num_bits
=
num_bits
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
def
quantize_forward
(
self
,
x
):
def
quantize_forward
(
self
,
x
):
identity
=
x
identity
=
x
out
=
self
.
qconvbnrelu1
(
x
)
# out = self.qconvbnrelu1(x)
out
=
self
.
qconvbn1
(
out
)
# out = self.qconvbn1(out)
out
=
self
.
qconv1
(
x
)
out
=
self
.
qrelu1
(
out
)
out
=
self
.
qconv2
(
out
)
if
self
.
downsample
is
not
None
:
if
self
.
downsample
is
not
None
:
identity
=
self
.
qconv
bn2
(
identity
)
identity
=
self
.
qconv
3
(
identity
)
# residual add
# residual add
out
=
identity
+
out
# 这里是需要写一个elementwiseadd的变换的,待后续修改
out
=
identity
+
out
# 这里是需要写一个elementwiseadd的变换的,待后续修改
out
=
self
.
qrelu
1
(
out
)
out
=
self
.
qrelu
2
(
out
)
return
out
return
out
def
freeze
(
self
,
qinput
):
def
freeze
(
self
,
qinput
):
# 这里的qconvbnrelu1其实是可以用前一层的qo的,但感觉不太好传参,就没用
# 这里的qconvbnrelu1其实是可以用前一层的qo的,但感觉不太好传参,就没用
# 还需仔细检查
# 还需仔细检查
self
.
qconvbnrelu1
.
freeze
(
qi
=
qinput
)
# 需要接前一个module的最后一个qo
self
.
qconv1
.
freeze
(
qi
=
qinput
)
# 需要接前一个module的最后一个qo
self
.
qconvbn1
.
freeze
(
qi
=
self
.
qconvbnrelu1
.
qo
)
self
.
qrelu1
.
freeze
(
self
.
qconv1
.
qo
)
self
.
qconv2
.
freeze
(
qi
=
self
.
qconv1
.
qo
)
if
self
.
downsample
is
not
None
:
if
self
.
downsample
is
not
None
:
self
.
qconv
bn2
.
freeze
(
qi
=
self
.
qconvbn1
.
qo
)
self
.
qconv
3
.
freeze
(
qi
=
self
.
qconv2
.
qo
)
self
.
qrelu
1
.
freeze
(
self
.
qconvbn2
.
qo
)
self
.
qrelu
2
.
freeze
()
# 这里的qo其实不太合理 qrelu2应该自己有对qi的记录可能才好一些,因为是identity+out,实际qo并不等于qocnv3.qo
return
self
.
q
convbn2
.
qo
return
self
.
q
relu2
.
qi
# qrelu没设置qo,只设置了qi
else
:
else
:
self
.
qrelu
1
.
freeze
(
self
.
qconvbn1
.
qo
)
self
.
qrelu
2
.
freeze
(
)
return
self
.
q
convbn1
.
qo
return
self
.
q
relu2
.
qi
def
quantize_inference
(
self
,
x
):
def
quantize_inference
(
self
,
x
):
# 感觉是不需要进行初始的quantize_tensor和dequantize_tensor,因为他不是最前/后一层,只要中间的每层都在量化后的领域内,就不需要这种处理。
# 感觉是不需要进行初始的quantize_tensor和dequantize_tensor,因为他不是最前/后一层,只要中间的每层都在量化后的领域内,就不需要这种处理。
identity
=
x
identity
=
x
out
=
self
.
qconvbnrelu1
.
quantize_inference
(
x
)
out
=
self
.
qconv1
.
quantize_inference
(
x
)
out
=
self
.
qconvbn1
.
quantize_inference
(
out
)
out
=
self
.
qrelu1
.
quantize_inference
(
out
)
out
=
self
.
qconv2
.
quantize_inference
(
out
)
if
self
.
downsample
is
not
None
:
if
self
.
downsample
is
not
None
:
identity
=
self
.
qconv
bn2
.
quantize_inference
(
identity
)
identity
=
self
.
qconv
3
.
quantize_inference
(
identity
)
out
=
identity
+
out
# 这里是需要写一个elementwiseadd的变换的,待后续修改
out
=
identity
+
out
# 这里是需要写一个elementwiseadd的变换的,待后续修改
out
=
self
.
qrelu
1
.
quantize_inference
(
out
)
out
=
self
.
qrelu
2
.
quantize_inference
(
out
)
return
out
return
out
...
@@ -501,14 +516,14 @@ class Bottleneck(nn.Module):
...
@@ -501,14 +516,14 @@ class Bottleneck(nn.Module):
class
MakeLayer
(
nn
.
Module
):
class
MakeLayer
(
nn
.
Module
):
def
__init__
(
self
,
block
,
planes
,
blocks
,
stride
=
1
,
n_exp
=
4
,
mode
=
1
):
def
__init__
(
self
,
block
,
planes
,
blocks
,
stride
=
1
,
n_exp
=
4
,
mode
=
1
):
super
(
MakeLayer
,
self
)
.
__init__
()
super
(
MakeLayer
,
self
)
.
__init__
()
print
(
'makelayer init:'
+
str
(
GlobalVariables
.
SELF_INPLANES
))
print
(
'makelayer init:'
+
str
(
GlobalVariables
.
SELF_INPLANES
))
self
.
downsample
=
None
self
.
downsample
=
None
if
stride
!=
1
or
GlobalVariables
.
SELF_INPLANES
!=
planes
*
block
.
expansion
:
if
stride
!=
1
or
GlobalVariables
.
SELF_INPLANES
!=
planes
*
block
.
expansion
:
self
.
downsample
=
nn
.
Sequential
(
self
.
downsample
=
nn
.
Sequential
(
nn
.
Conv2d
(
GlobalVariables
.
SELF_INPLANES
,
planes
*
block
.
expansion
,
kernel_size
=
1
,
stride
=
stride
,
bias
=
False
),
nn
.
Conv2d
(
GlobalVariables
.
SELF_INPLANES
,
planes
*
block
.
expansion
,
kernel_size
=
1
,
stride
=
stride
,
bias
=
True
)
nn
.
BatchNorm2d
(
planes
*
block
.
expansion
)
#
nn.BatchNorm2d(planes * block.expansion)
)
)
self
.
n_exp
=
n_exp
self
.
n_exp
=
n_exp
self
.
mode
=
mode
self
.
mode
=
mode
...
@@ -519,27 +534,6 @@ class MakeLayer(nn.Module):
...
@@ -519,27 +534,6 @@ class MakeLayer(nn.Module):
self
.
blockdict
[
'block'
+
str
(
i
+
1
)]
=
block
(
inplanes
=
GlobalVariables
.
SELF_INPLANES
,
planes
=
planes
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
# 此处进行实例化了
self
.
blockdict
[
'block'
+
str
(
i
+
1
)]
=
block
(
inplanes
=
GlobalVariables
.
SELF_INPLANES
,
planes
=
planes
,
n_exp
=
self
.
n_exp
,
mode
=
self
.
mode
)
# 此处进行实例化了
# def _make_layer(self, block, planes, blocks, stride=1):
# downsample = None
# # stride 是卷积层的步幅,而 self.inplanes 表示当前残差块输入的通道数,
# # planes * block.expansion 则表示当前残差块输出的通道数。因此,当 stride 不等于 1 或者 self.inplanes 不等于 planes * block.expansion 时,就需要进行下采样操作
# #该层中除了第一个残差块之外,其他所有残差块的输入通道数和输出通道数都相等,并且具有相同的步幅(都为 1 或者 2)。这些卷积层的输入张量大小不变, 输出张量高宽尺寸会随着残差块的堆叠而逐渐降低
# if stride != 1 or SELF_INPLANES != planes * block.expansion:
# downsample = nn.Sequential(
# nn.Conv2d(SELF_INPLANES, planes * block.expansion,
# kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion),
# )
# layers = []
# layers.append(block(SELF_INPLANES, planes, stride, downsample))
# SELF_INPLANES = planes * block.expansion
# for _ in range(1, blocks): # block的个数
# layers.append(block(SELF_INPLANES, planes))
# return nn.Sequential(*layers)
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
for
_
,
layer
in
self
.
blockdict
.
items
():
for
_
,
layer
in
self
.
blockdict
.
items
():
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment