Instead of manually incrementing a counter variable,

give each module a unique label.
This commit is contained in:
FeepingCreature 2017-02-06 15:02:28 +01:00
parent dbd5f271c2
commit 47394959b5
1 changed files with 60 additions and 61 deletions

View File

@ -8,38 +8,37 @@ class DCGAN_D(nn.Container):
self.ngpu = ngpu self.ngpu = ngpu
assert isize % 16 == 0, "isize has to be a multiple of 16" assert isize % 16 == 0, "isize has to be a multiple of 16"
main = nn.Sequential( main = nn.Sequential()
# input is nc x isize x isize # input is nc x isize x isize
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), main.add_module('initial.conv.{0}-{1}'.format(nc, ndf),
nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
) main.add_module('initial.relu.{0}'.format(ndf),
i, csize, cndf = 2, isize / 2, ndf nn.LeakyReLU(0.2, inplace=True))
csize, cndf = isize / 2, ndf
# Extra layers # Extra layers
for t in range(n_extra_layers): for t in range(n_extra_layers):
main.add_module(str(i), main.add_module('extra-layers-{0}.{1}.conv'.format(t, cndf),
nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False)) nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))
main.add_module(str(i+1), main.add_module('extra-layers-{0}.{1}.batchnorm'.format(t, cndf),
nn.BatchNorm2d(cndf)) nn.BatchNorm2d(cndf))
main.add_module(str(i+2), main.add_module('extra-layers-{0}.{1}.relu'.format(t, cndf),
nn.LeakyReLU(0.2, inplace=True)) nn.LeakyReLU(0.2, inplace=True))
i += 3
while csize > 4: while csize > 4:
in_feat = cndf in_feat = cndf
out_feat = cndf * 2 out_feat = cndf * 2
main.add_module(str(i), main.add_module('pyramid.{0}-{1}.conv'.format(in_feat, out_feat),
nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False)) nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))
main.add_module(str(i+1), main.add_module('pyramid.{0}.batchnorm'.format(out_feat),
nn.BatchNorm2d(out_feat)) nn.BatchNorm2d(out_feat))
main.add_module(str(i+2), main.add_module('pyramid.{0}.relu'.format(out_feat),
nn.LeakyReLU(0.2, inplace=True)) nn.LeakyReLU(0.2, inplace=True))
i+=3
cndf = cndf * 2 cndf = cndf * 2
csize = csize / 2 csize = csize / 2
# state size. K x 4 x 4 # state size. K x 4 x 4
main.add_module(str(i), main.add_module('final.{0}-{1}.conv'.format(cndf, 1),
nn.Conv2d(cndf, 1, 4, 1, 0, bias=False)) nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))
self.main = main self.main = main
@ -63,38 +62,39 @@ class DCGAN_G(nn.Container):
cngf = cngf * 2 cngf = cngf * 2
tisize = tisize * 2 tisize = tisize * 2
main = nn.Sequential( main = nn.Sequential()
# input is Z, going into a convolution # input is Z, going into a convolution
nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False), main.add_module('initial.{0}-{1}.convt'.format(nz, cngf),
nn.BatchNorm2d(cngf), nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False))
nn.ReLU(True), main.add_module('initial.{0}.batchnorm'.format(cngf),
) nn.BatchNorm2d(cngf))
main.add_module('initial.{0}.relu'.format(cngf),
nn.ReLU(True))
i, csize, cndf = 3, 4, cngf csize, cndf = 4, cngf
while csize < isize//2: while csize < isize//2:
main.add_module(str(i), main.add_module('pyramid.{0}-{1}.convt'.format(cngf, cngf//2),
nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False)) nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False))
main.add_module(str(i+1), main.add_module('pyramid.{0}.batchnorm'.format(cngf//2),
nn.BatchNorm2d(cngf//2)) nn.BatchNorm2d(cngf//2))
main.add_module(str(i+2), main.add_module('pyramid.{0}.relu'.format(cngf//2),
nn.ReLU(True)) nn.ReLU(True))
i += 3
cngf = cngf // 2 cngf = cngf // 2
csize = csize * 2 csize = csize * 2
# Extra layers # Extra layers
for t in range(n_extra_layers): for t in range(n_extra_layers):
main.add_module(str(i), main.add_module('extra-layers-{0}.{1}.conv'.format(t, cngf),
nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False)) nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False))
main.add_module(str(i+1), main.add_module('extra-layers-{0}.{1}.batchnorm'.format(t, cngf),
nn.BatchNorm2d(cngf)) nn.BatchNorm2d(cngf))
main.add_module(str(i+2), main.add_module('extra-layers-{0}.{1}.relu'.format(t, cngf),
nn.ReLU(True)) nn.ReLU(True))
i += 3
main.add_module(str(i), main.add_module('final.{0}-{1}.convt'.format(cngf, nc),
nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False)) nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
main.add_module(str(i+1), nn.Tanh()) main.add_module('final.{0}.tanh'.format(nc),
nn.Tanh())
self.main = main self.main = main
def forward(self, input): def forward(self, input):
@ -110,34 +110,34 @@ class DCGAN_D_nobn(nn.Container):
self.ngpu = ngpu self.ngpu = ngpu
assert isize % 16 == 0, "isize has to be a multiple of 16" assert isize % 16 == 0, "isize has to be a multiple of 16"
main = nn.Sequential( main = nn.Sequential()
# input is nc x isize x isize # input is nc x isize x isize
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), # input is nc x isize x isize
nn.LeakyReLU(0.2, inplace=True), main.add_module('initial.conv.{0}-{1}'.format(nc, ndf),
) nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
i, csize, cndf = 2, isize / 2, ndf main.add_module('initial.relu.{0}'.format(ndf),
nn.LeakyReLU(0.2, inplace=True))
csize, cndf = isize / 2, ndf
# Extra layers # Extra layers
for t in range(n_extra_layers): for t in range(n_extra_layers):
main.add_module(str(i), main.add_module('extra-layers-{0}.{1}.conv'.format(t, cndf),
nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False)) nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))
main.add_module(str(i+1), main.add_module('extra-layers-{0}.{1}.relu'.format(t, cndf),
nn.LeakyReLU(0.2, inplace=True)) nn.LeakyReLU(0.2, inplace=True))
i += 2
while csize > 4: while csize > 4:
in_feat = cndf in_feat = cndf
out_feat = cndf * 2 out_feat = cndf * 2
main.add_module(str(i), main.add_module('pyramid.{0}-{1}.conv'.format(in_feat, out_feat),
nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False)) nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))
main.add_module(str(i+1), main.add_module('pyramid.{0}.relu'.format(out_feat),
nn.LeakyReLU(0.2, inplace=True)) nn.LeakyReLU(0.2, inplace=True))
i+=2
cndf = cndf * 2 cndf = cndf * 2
csize = csize / 2 csize = csize / 2
# state size. K x 4 x 4 # state size. K x 4 x 4
main.add_module(str(i), main.add_module('final.{0}-{1}.conv'.format(cndf, 1),
nn.Conv2d(cndf, 1, 4, 1, 0, bias=False)) nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))
self.main = main self.main = main
@ -161,33 +161,32 @@ class DCGAN_G_nobn(nn.Container):
cngf = cngf * 2 cngf = cngf * 2
tisize = tisize * 2 tisize = tisize * 2
main = nn.Sequential( main = nn.Sequential()
# input is Z, going into a convolution main.add_module('initial.{0}-{1}.convt'.format(nz, cngf),
nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False), nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False))
nn.ReLU(True), main.add_module('initial.{0}.relu'.format(cngf),
) nn.ReLU(True))
i, csize, cndf = 3, 4, cngf csize, cndf = 4, cngf
while csize < isize//2: while csize < isize//2:
main.add_module(str(i), main.add_module('pyramid.{0}-{1}.convt'.format(cngf, cngf//2),
nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False)) nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False))
main.add_module(str(i+1), main.add_module('pyramid.{0}.relu'.format(cngf//2),
nn.ReLU(True)) nn.ReLU(True))
i += 2
cngf = cngf // 2 cngf = cngf // 2
csize = csize * 2 csize = csize * 2
# Extra layers # Extra layers
for t in range(n_extra_layers): for t in range(n_extra_layers):
main.add_module(str(i), main.add_module('extra-layers-{0}.{1}.conv'.format(t, cngf),
nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False)) nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False))
main.add_module(str(i+1), main.add_module('extra-layers-{0}.{1}.relu'.format(t, cngf),
nn.ReLU(True)) nn.ReLU(True))
i += 2
main.add_module(str(i), main.add_module('final.{0}-{1}.convt'.format(cngf, nc),
nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False)) nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
main.add_module(str(i+1), nn.Tanh()) main.add_module('final.{0}.tanh'.format(nc),
nn.Tanh())
self.main = main self.main = main
def forward(self, input): def forward(self, input):