2017-01-30 14:19:57 +00:00
|
|
|
import torch
|
|
|
|
import torch.nn as nn
|
|
|
|
import torch.nn.parallel
|
|
|
|
|
2017-02-27 20:39:45 +00:00
|
|
|
class DCGAN_D(nn.Module):
|
2017-01-30 14:19:57 +00:00
|
|
|
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0):
|
|
|
|
super(DCGAN_D, self).__init__()
|
|
|
|
self.ngpu = ngpu
|
|
|
|
assert isize % 16 == 0, "isize has to be a multiple of 16"
|
|
|
|
|
2017-02-06 14:02:28 +00:00
|
|
|
main = nn.Sequential()
|
|
|
|
# input is nc x isize x isize
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('initial:{0}-{1}:conv'.format(nc, ndf),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('initial:{0}:relu'.format(ndf),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.LeakyReLU(0.2, inplace=True))
|
|
|
|
csize, cndf = isize / 2, ndf
|
2017-01-30 14:19:57 +00:00
|
|
|
|
|
|
|
# Extra layers
|
|
|
|
for t in range(n_extra_layers):
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('extra-layers-{0}:{1}:conv'.format(t, cndf),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('extra-layers-{0}:{1}:batchnorm'.format(t, cndf),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.BatchNorm2d(cndf))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('extra-layers-{0}:{1}:relu'.format(t, cndf),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.LeakyReLU(0.2, inplace=True))
|
|
|
|
|
|
|
|
while csize > 4:
|
|
|
|
in_feat = cndf
|
|
|
|
out_feat = cndf * 2
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('pyramid:{0}-{1}:conv'.format(in_feat, out_feat),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('pyramid:{0}:batchnorm'.format(out_feat),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.BatchNorm2d(out_feat))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('pyramid:{0}:relu'.format(out_feat),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.LeakyReLU(0.2, inplace=True))
|
|
|
|
cndf = cndf * 2
|
|
|
|
csize = csize / 2
|
|
|
|
|
|
|
|
# state size. K x 4 x 4
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('final:{0}-{1}:conv'.format(cndf, 1),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))
|
|
|
|
self.main = main
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, input):
|
|
|
|
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
|
2017-04-06 05:49:14 +00:00
|
|
|
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
|
|
|
|
else:
|
|
|
|
output = self.main(input)
|
|
|
|
|
2017-01-30 14:19:57 +00:00
|
|
|
output = output.mean(0)
|
|
|
|
return output.view(1)
|
|
|
|
|
2017-02-27 20:39:45 +00:00
|
|
|
class DCGAN_G(nn.Module):
|
2017-01-30 14:19:57 +00:00
|
|
|
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):
|
|
|
|
super(DCGAN_G, self).__init__()
|
|
|
|
self.ngpu = ngpu
|
|
|
|
assert isize % 16 == 0, "isize has to be a multiple of 16"
|
|
|
|
|
|
|
|
cngf, tisize = ngf//2, 4
|
|
|
|
while tisize != isize:
|
|
|
|
cngf = cngf * 2
|
|
|
|
tisize = tisize * 2
|
|
|
|
|
2017-02-06 14:02:28 +00:00
|
|
|
main = nn.Sequential()
|
|
|
|
# input is Z, going into a convolution
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('initial:{0}-{1}:convt'.format(nz, cngf),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('initial:{0}:batchnorm'.format(cngf),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.BatchNorm2d(cngf))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('initial:{0}:relu'.format(cngf),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.ReLU(True))
|
2017-01-30 14:19:57 +00:00
|
|
|
|
2017-02-06 14:02:28 +00:00
|
|
|
csize, cndf = 4, cngf
|
2017-01-30 14:19:57 +00:00
|
|
|
while csize < isize//2:
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('pyramid:{0}-{1}:convt'.format(cngf, cngf//2),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('pyramid:{0}:batchnorm'.format(cngf//2),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.BatchNorm2d(cngf//2))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('pyramid:{0}:relu'.format(cngf//2),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.ReLU(True))
|
|
|
|
cngf = cngf // 2
|
|
|
|
csize = csize * 2
|
|
|
|
|
|
|
|
# Extra layers
|
|
|
|
for t in range(n_extra_layers):
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('extra-layers-{0}:{1}:conv'.format(t, cngf),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('extra-layers-{0}:{1}:batchnorm'.format(t, cngf),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.BatchNorm2d(cngf))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('extra-layers-{0}:{1}:relu'.format(t, cngf),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.ReLU(True))
|
|
|
|
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('final:{0}-{1}:convt'.format(cngf, nc),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('final:{0}:tanh'.format(nc),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.Tanh())
|
2017-01-30 14:19:57 +00:00
|
|
|
self.main = main
|
|
|
|
|
|
|
|
def forward(self, input):
|
|
|
|
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
|
2017-04-06 05:49:14 +00:00
|
|
|
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
|
|
|
|
else:
|
|
|
|
output = self.main(input)
|
|
|
|
return output
|
2017-01-30 14:19:57 +00:00
|
|
|
###############################################################################
|
2017-02-27 20:39:45 +00:00
|
|
|
class DCGAN_D_nobn(nn.Module):
|
2017-01-30 14:19:57 +00:00
|
|
|
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0):
|
|
|
|
super(DCGAN_D_nobn, self).__init__()
|
|
|
|
self.ngpu = ngpu
|
|
|
|
assert isize % 16 == 0, "isize has to be a multiple of 16"
|
|
|
|
|
2017-02-06 14:02:28 +00:00
|
|
|
main = nn.Sequential()
|
|
|
|
# input is nc x isize x isize
|
|
|
|
# input is nc x isize x isize
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('initial:{0}-{1}:conv'.format(nc, ndf),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('initial:{0}:conv'.format(ndf),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.LeakyReLU(0.2, inplace=True))
|
|
|
|
csize, cndf = isize / 2, ndf
|
2017-01-30 14:19:57 +00:00
|
|
|
|
|
|
|
# Extra layers
|
|
|
|
for t in range(n_extra_layers):
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('extra-layers-{0}:{1}:conv'.format(t, cndf),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('extra-layers-{0}:{1}:relu'.format(t, cndf),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.LeakyReLU(0.2, inplace=True))
|
|
|
|
|
|
|
|
while csize > 4:
|
|
|
|
in_feat = cndf
|
|
|
|
out_feat = cndf * 2
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('pyramid:{0}-{1}:conv'.format(in_feat, out_feat),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('pyramid:{0}:relu'.format(out_feat),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.LeakyReLU(0.2, inplace=True))
|
|
|
|
cndf = cndf * 2
|
|
|
|
csize = csize / 2
|
|
|
|
|
|
|
|
# state size. K x 4 x 4
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('final:{0}-{1}:conv'.format(cndf, 1),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))
|
|
|
|
self.main = main
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, input):
|
|
|
|
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
|
2017-04-06 05:49:14 +00:00
|
|
|
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
|
|
|
|
else:
|
|
|
|
output = self.main(input)
|
|
|
|
|
2017-01-30 14:19:57 +00:00
|
|
|
output = output.mean(0)
|
|
|
|
return output.view(1)
|
|
|
|
|
2017-02-27 20:39:45 +00:00
|
|
|
class DCGAN_G_nobn(nn.Module):
|
2017-01-30 14:19:57 +00:00
|
|
|
def __init__(self, isize, nz, nc, ngf, ngpu, n_extra_layers=0):
|
|
|
|
super(DCGAN_G_nobn, self).__init__()
|
|
|
|
self.ngpu = ngpu
|
|
|
|
assert isize % 16 == 0, "isize has to be a multiple of 16"
|
|
|
|
|
|
|
|
cngf, tisize = ngf//2, 4
|
|
|
|
while tisize != isize:
|
|
|
|
cngf = cngf * 2
|
|
|
|
tisize = tisize * 2
|
|
|
|
|
2017-02-06 14:02:28 +00:00
|
|
|
main = nn.Sequential()
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('initial:{0}-{1}:convt'.format(nz, cngf),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.ConvTranspose2d(nz, cngf, 4, 1, 0, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('initial:{0}:relu'.format(cngf),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.ReLU(True))
|
2017-01-30 14:19:57 +00:00
|
|
|
|
2017-02-06 14:02:28 +00:00
|
|
|
csize, cndf = 4, cngf
|
2017-01-30 14:19:57 +00:00
|
|
|
while csize < isize//2:
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('pyramid:{0}-{1}:convt'.format(cngf, cngf//2),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.ConvTranspose2d(cngf, cngf//2, 4, 2, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('pyramid:{0}:relu'.format(cngf//2),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.ReLU(True))
|
|
|
|
cngf = cngf // 2
|
|
|
|
csize = csize * 2
|
|
|
|
|
|
|
|
# Extra layers
|
|
|
|
for t in range(n_extra_layers):
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('extra-layers-{0}:{1}:conv'.format(t, cngf),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.Conv2d(cngf, cngf, 3, 1, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('extra-layers-{0}:{1}:relu'.format(t, cngf),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.ReLU(True))
|
|
|
|
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('final:{0}-{1}:convt'.format(cngf, nc),
|
2017-01-30 14:19:57 +00:00
|
|
|
nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False))
|
2018-12-25 21:59:23 +00:00
|
|
|
main.add_module('final:{0}:tanh'.format(nc),
|
2017-02-06 14:02:28 +00:00
|
|
|
nn.Tanh())
|
2017-01-30 14:19:57 +00:00
|
|
|
self.main = main
|
|
|
|
|
|
|
|
def forward(self, input):
|
|
|
|
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
|
2017-04-06 05:49:14 +00:00
|
|
|
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
|
|
|
|
else:
|
|
|
|
output = self.main(input)
|
|
|
|
return output
|