Remove dots from module names to execute the code with pytorch 1.0.0.

This commit is contained in:
kopytjuk 2018-12-25 22:59:23 +01:00
parent 72853533a0
commit 5981f2a3d5
1 changed files with 35 additions and 35 deletions

View File

@ -10,35 +10,35 @@ class DCGAN_D(nn.Module):
main = nn.Sequential() main = nn.Sequential()
# input is nc x isize x isize # input is nc x isize x isize
main.add_module('initial.conv.{0}-{1}'.format(nc, ndf), main.add_module('initial:{0}-{1}:conv'.format(nc, ndf),
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False)) nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
main.add_module('initial.relu.{0}'.format(ndf), main.add_module('initial:{0}:relu'.format(ndf),
nn.LeakyReLU(0.2, inplace=True)) nn.LeakyReLU(0.2, inplace=True))
csize, cndf = isize / 2, ndf csize, cndf = isize / 2, ndf
# Extra layers # Extra layers
for t in range(n_extra_layers): for t in range(n_extra_layers):
main.add_module('extra-layers-{0}.{1}.conv'.format(t, cndf), main.add_module('extra-layers-{0}:{1}:conv'.format(t, cndf),
nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False)) nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))
main.add_module('extra-layers-{0}.{1}.batchnorm'.format(t, cndf), main.add_module('extra-layers-{0}:{1}:batchnorm'.format(t, cndf),
nn.BatchNorm2d(cndf)) nn.BatchNorm2d(cndf))
main.add_module('extra-layers-{0}.{1}.relu'.format(t, cndf), main.add_module('extra-layers-{0}:{1}:relu'.format(t, cndf),
nn.LeakyReLU(0.2, inplace=True)) nn.LeakyReLU(0.2, inplace=True))
while csize > 4: while csize > 4:
in_feat = cndf in_feat = cndf
out_feat = cndf * 2 out_feat = cndf * 2
main.add_module('pyramid.{0}-{1}.conv'.format(in_feat, out_feat), main.add_module('pyramid:{0}-{1}:conv'.format(in_feat, out_feat),
nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False)) nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))
main.add_module('pyramid.{0}.batchnorm'.format(out_feat), main.add_module('pyramid:{0}:batchnorm'.format(out_feat),
nn.BatchNorm2d(out_feat)) nn.BatchNorm2d(out_feat))
main.add_module('pyramid.{0}.relu'.format(out_feat), main.add_module('pyramid:{0}:relu'.format(out_feat),
nn.LeakyReLU(0.2, inplace=True)) nn.LeakyReLU(0.2, inplace=True))
cndf = cndf * 2 cndf = cndf * 2
csize = csize / 2 csize = csize / 2
# state size. K x 4 x 4 # state size. K x 4 x 4
main.add_module('final.{0}-{1}.conv'.format(cndf, 1), main.add_module('final:{0}-{1}:conv'.format(cndf, 1),
nn.Conv2d(cndf, 1, 4, 1, 0, bias=False)) nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))
self.main = main self.main = main
@ -65,36 +65,36 @@ class DCGAN_G(nn.Module):
main = nn.Sequential() main = nn.Sequential()
# input is Z, going into a convolution # input is Z, going into a convolution
main.add_module('initial.{0}-{1}.convt'.format(nz, cngf), main.add_module('initial:{0}-{1}:convt'.format(nz, cngf),
nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False)) nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False))
main.add_module('initial.{0}.batchnorm'.format(cngf), main.add_module('initial:{0}:batchnorm'.format(cngf),
nn.BatchNorm2d(cngf)) nn.BatchNorm2d(cngf))
main.add_module('initial.{0}.relu'.format(cngf), main.add_module('initial:{0}:relu'.format(cngf),
nn.ReLU(True)) nn.ReLU(True))
csize, cndf = 4, cngf csize, cndf = 4, cngf
while csize < isize//2: while csize < isize//2:
main.add_module('pyramid.{0}-{1}.convt'.format(cngf, cngf//2), main.add_module('pyramid:{0}-{1}:convt'.format(cngf, cngf//2),
nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False)) nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False))
main.add_module('pyramid.{0}.batchnorm'.format(cngf//2), main.add_module('pyramid:{0}:batchnorm'.format(cngf//2),
nn.BatchNorm2d(cngf//2)) nn.BatchNorm2d(cngf//2))
main.add_module('pyramid.{0}.relu'.format(cngf//2), main.add_module('pyramid:{0}:relu'.format(cngf//2),
nn.ReLU(True)) nn.ReLU(True))
cngf = cngf // 2 cngf = cngf // 2
csize = csize * 2 csize = csize * 2
# Extra layers # Extra layers
for t in range(n_extra_layers): for t in range(n_extra_layers):
main.add_module('extra-layers-{0}.{1}.conv'.format(t, cngf), main.add_module('extra-layers-{0}:{1}:conv'.format(t, cngf),
nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False)) nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False))
main.add_module('extra-layers-{0}.{1}.batchnorm'.format(t, cngf), main.add_module('extra-layers-{0}:{1}:batchnorm'.format(t, cngf),
nn.BatchNorm2d(cngf)) nn.BatchNorm2d(cngf))
main.add_module('extra-layers-{0}.{1}.relu'.format(t, cngf), main.add_module('extra-layers-{0}:{1}:relu'.format(t, cngf),
nn.ReLU(True)) nn.ReLU(True))
main.add_module('final.{0}-{1}.convt'.format(cngf, nc), main.add_module('final:{0}-{1}:convt'.format(cngf, nc),
nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False)) nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
main.add_module('final.{0}.tanh'.format(nc), main.add_module('final:{0}:tanh'.format(nc),
nn.Tanh()) nn.Tanh())
self.main = main self.main = main
@ -114,31 +114,31 @@ class DCGAN_D_nobn(nn.Module):
main = nn.Sequential() main = nn.Sequential()
# input is nc x isize x isize # input is nc x isize x isize
# input is nc x isize x isize # input is nc x isize x isize
main.add_module('initial.conv.{0}-{1}'.format(nc, ndf), main.add_module('initial:{0}-{1}:conv'.format(nc, ndf),
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False)) nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
main.add_module('initial.relu.{0}'.format(ndf), main.add_module('initial:{0}:conv'.format(ndf),
nn.LeakyReLU(0.2, inplace=True)) nn.LeakyReLU(0.2, inplace=True))
csize, cndf = isize / 2, ndf csize, cndf = isize / 2, ndf
# Extra layers # Extra layers
for t in range(n_extra_layers): for t in range(n_extra_layers):
main.add_module('extra-layers-{0}.{1}.conv'.format(t, cndf), main.add_module('extra-layers-{0}:{1}:conv'.format(t, cndf),
nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False)) nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))
main.add_module('extra-layers-{0}.{1}.relu'.format(t, cndf), main.add_module('extra-layers-{0}:{1}:relu'.format(t, cndf),
nn.LeakyReLU(0.2, inplace=True)) nn.LeakyReLU(0.2, inplace=True))
while csize > 4: while csize > 4:
in_feat = cndf in_feat = cndf
out_feat = cndf * 2 out_feat = cndf * 2
main.add_module('pyramid.{0}-{1}.conv'.format(in_feat, out_feat), main.add_module('pyramid:{0}-{1}:conv'.format(in_feat, out_feat),
nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False)) nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))
main.add_module('pyramid.{0}.relu'.format(out_feat), main.add_module('pyramid:{0}:relu'.format(out_feat),
nn.LeakyReLU(0.2, inplace=True)) nn.LeakyReLU(0.2, inplace=True))
cndf = cndf * 2 cndf = cndf * 2
csize = csize / 2 csize = csize / 2
# state size. K x 4 x 4 # state size. K x 4 x 4
main.add_module('final.{0}-{1}.conv'.format(cndf, 1), main.add_module('final:{0}-{1}:conv'.format(cndf, 1),
nn.Conv2d(cndf, 1, 4, 1, 0, bias=False)) nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))
self.main = main self.main = main
@ -164,30 +164,30 @@ class DCGAN_G_nobn(nn.Module):
tisize = tisize * 2 tisize = tisize * 2
main = nn.Sequential() main = nn.Sequential()
main.add_module('initial.{0}-{1}.convt'.format(nz, cngf), main.add_module('initial:{0}-{1}:convt'.format(nz, cngf),
nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False)) nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False))
main.add_module('initial.{0}.relu'.format(cngf), main.add_module('initial:{0}:relu'.format(cngf),
nn.ReLU(True)) nn.ReLU(True))
csize, cndf = 4, cngf csize, cndf = 4, cngf
while csize < isize//2: while csize < isize//2:
main.add_module('pyramid.{0}-{1}.convt'.format(cngf, cngf//2), main.add_module('pyramid:{0}-{1}:convt'.format(cngf, cngf//2),
nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False)) nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False))
main.add_module('pyramid.{0}.relu'.format(cngf//2), main.add_module('pyramid:{0}:relu'.format(cngf//2),
nn.ReLU(True)) nn.ReLU(True))
cngf = cngf // 2 cngf = cngf // 2
csize = csize * 2 csize = csize * 2
# Extra layers # Extra layers
for t in range(n_extra_layers): for t in range(n_extra_layers):
main.add_module('extra-layers-{0}.{1}.conv'.format(t, cngf), main.add_module('extra-layers-{0}:{1}:conv'.format(t, cngf),
nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False)) nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False))
main.add_module('extra-layers-{0}.{1}.relu'.format(t, cngf), main.add_module('extra-layers-{0}:{1}:relu'.format(t, cngf),
nn.ReLU(True)) nn.ReLU(True))
main.add_module('final.{0}-{1}.convt'.format(cngf, nc), main.add_module('final:{0}-{1}:convt'.format(cngf, nc),
nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False)) nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
main.add_module('final.{0}.tanh'.format(nc), main.add_module('final:{0}:tanh'.format(nc),
nn.Tanh()) nn.Tanh())
self.main = main self.main = main