diff --git a/models/dcgan.py b/models/dcgan.py index 1dd8dbf..f821c89 100644 --- a/models/dcgan.py +++ b/models/dcgan.py @@ -10,35 +10,35 @@ class DCGAN_D(nn.Module): main = nn.Sequential() # input is nc x isize x isize - main.add_module('initial.conv.{0}-{1}'.format(nc, ndf), + main.add_module('initial:{0}-{1}:conv'.format(nc, ndf), nn.Conv2d(nc, ndf, 4, 2, 1, bias=False)) - main.add_module('initial.relu.{0}'.format(ndf), + main.add_module('initial:{0}:relu'.format(ndf), nn.LeakyReLU(0.2, inplace=True)) csize, cndf = isize / 2, ndf # Extra layers for t in range(n_extra_layers): - main.add_module('extra-layers-{0}.{1}.conv'.format(t, cndf), + main.add_module('extra-layers-{0}:{1}:conv'.format(t, cndf), nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False)) - main.add_module('extra-layers-{0}.{1}.batchnorm'.format(t, cndf), + main.add_module('extra-layers-{0}:{1}:batchnorm'.format(t, cndf), nn.BatchNorm2d(cndf)) - main.add_module('extra-layers-{0}.{1}.relu'.format(t, cndf), + main.add_module('extra-layers-{0}:{1}:relu'.format(t, cndf), nn.LeakyReLU(0.2, inplace=True)) while csize > 4: in_feat = cndf out_feat = cndf * 2 - main.add_module('pyramid.{0}-{1}.conv'.format(in_feat, out_feat), + main.add_module('pyramid:{0}-{1}:conv'.format(in_feat, out_feat), nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False)) - main.add_module('pyramid.{0}.batchnorm'.format(out_feat), + main.add_module('pyramid:{0}:batchnorm'.format(out_feat), nn.BatchNorm2d(out_feat)) - main.add_module('pyramid.{0}.relu'.format(out_feat), + main.add_module('pyramid:{0}:relu'.format(out_feat), nn.LeakyReLU(0.2, inplace=True)) cndf = cndf * 2 csize = csize / 2 # state size. K x 4 x 4 - main.add_module('final.{0}-{1}.conv'.format(cndf, 1), + main.add_module('final:{0}-{1}:conv'.format(cndf, 1), nn.Conv2d(cndf, 1, 4, 1, 0, bias=False)) self.main = main @@ -65,36 +65,36 @@ class DCGAN_G(nn.Module): main = nn.Sequential() # input is Z, going into a convolution - main.add_module('initial.{0}-{1}.convt'.format(nz, cngf), + main.add_module('initial:{0}-{1}:convt'.format(nz, cngf), nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False)) - main.add_module('initial.{0}.batchnorm'.format(cngf), + main.add_module('initial:{0}:batchnorm'.format(cngf), nn.BatchNorm2d(cngf)) - main.add_module('initial.{0}.relu'.format(cngf), + main.add_module('initial:{0}:relu'.format(cngf), nn.ReLU(True)) csize, cndf = 4, cngf while csize < isize//2: - main.add_module('pyramid.{0}-{1}.convt'.format(cngf, cngf//2), + main.add_module('pyramid:{0}-{1}:convt'.format(cngf, cngf//2), nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False)) - main.add_module('pyramid.{0}.batchnorm'.format(cngf//2), + main.add_module('pyramid:{0}:batchnorm'.format(cngf//2), nn.BatchNorm2d(cngf//2)) - main.add_module('pyramid.{0}.relu'.format(cngf//2), + main.add_module('pyramid:{0}:relu'.format(cngf//2), nn.ReLU(True)) cngf = cngf // 2 csize = csize * 2 # Extra layers for t in range(n_extra_layers): - main.add_module('extra-layers-{0}.{1}.conv'.format(t, cngf), + main.add_module('extra-layers-{0}:{1}:conv'.format(t, cngf), nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False)) - main.add_module('extra-layers-{0}.{1}.batchnorm'.format(t, cngf), + main.add_module('extra-layers-{0}:{1}:batchnorm'.format(t, cngf), nn.BatchNorm2d(cngf)) - main.add_module('extra-layers-{0}.{1}.relu'.format(t, cngf), + main.add_module('extra-layers-{0}:{1}:relu'.format(t, cngf), nn.ReLU(True)) - main.add_module('final.{0}-{1}.convt'.format(cngf, nc), + main.add_module('final:{0}-{1}:convt'.format(cngf, nc), nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False)) - main.add_module('final.{0}.tanh'.format(nc), + main.add_module('final:{0}:tanh'.format(nc), nn.Tanh()) self.main = main @@ -114,31 +114,31 @@ class DCGAN_D_nobn(nn.Module): main = nn.Sequential() # input is nc x isize x isize # input is nc x isize x isize - main.add_module('initial.conv.{0}-{1}'.format(nc, ndf), + main.add_module('initial:{0}-{1}:conv'.format(nc, ndf), nn.Conv2d(nc, ndf, 4, 2, 1, bias=False)) - main.add_module('initial.relu.{0}'.format(ndf), + main.add_module('initial:{0}:conv'.format(ndf), nn.LeakyReLU(0.2, inplace=True)) csize, cndf = isize / 2, ndf # Extra layers for t in range(n_extra_layers): - main.add_module('extra-layers-{0}.{1}.conv'.format(t, cndf), + main.add_module('extra-layers-{0}:{1}:conv'.format(t, cndf), nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False)) - main.add_module('extra-layers-{0}.{1}.relu'.format(t, cndf), + main.add_module('extra-layers-{0}:{1}:relu'.format(t, cndf), nn.LeakyReLU(0.2, inplace=True)) while csize > 4: in_feat = cndf out_feat = cndf * 2 - main.add_module('pyramid.{0}-{1}.conv'.format(in_feat, out_feat), + main.add_module('pyramid:{0}-{1}:conv'.format(in_feat, out_feat), nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False)) - main.add_module('pyramid.{0}.relu'.format(out_feat), + main.add_module('pyramid:{0}:relu'.format(out_feat), nn.LeakyReLU(0.2, inplace=True)) cndf = cndf * 2 csize = csize / 2 # state size. K x 4 x 4 - main.add_module('final.{0}-{1}.conv'.format(cndf, 1), + main.add_module('final:{0}-{1}:conv'.format(cndf, 1), nn.Conv2d(cndf, 1, 4, 1, 0, bias=False)) self.main = main @@ -164,30 +164,30 @@ class DCGAN_G_nobn(nn.Module): tisize = tisize * 2 main = nn.Sequential() - main.add_module('initial.{0}-{1}.convt'.format(nz, cngf), + main.add_module('initial:{0}-{1}:convt'.format(nz, cngf), nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False)) - main.add_module('initial.{0}.relu'.format(cngf), + main.add_module('initial:{0}:relu'.format(cngf), nn.ReLU(True)) csize, cndf = 4, cngf while csize < isize//2: - main.add_module('pyramid.{0}-{1}.convt'.format(cngf, cngf//2), + main.add_module('pyramid:{0}-{1}:convt'.format(cngf, cngf//2), nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False)) - main.add_module('pyramid.{0}.relu'.format(cngf//2), + main.add_module('pyramid:{0}:relu'.format(cngf//2), nn.ReLU(True)) cngf = cngf // 2 csize = csize * 2 # Extra layers for t in range(n_extra_layers): - main.add_module('extra-layers-{0}.{1}.conv'.format(t, cngf), + main.add_module('extra-layers-{0}:{1}:conv'.format(t, cngf), nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False)) - main.add_module('extra-layers-{0}.{1}.relu'.format(t, cngf), + main.add_module('extra-layers-{0}:{1}:relu'.format(t, cngf), nn.ReLU(True)) - main.add_module('final.{0}-{1}.convt'.format(cngf, nc), + main.add_module('final:{0}-{1}:convt'.format(cngf, nc), nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False)) - main.add_module('final.{0}.tanh'.format(nc), + main.add_module('final:{0}:tanh'.format(nc), nn.Tanh()) self.main = main