Merge pull request #69 from kopytjuk/bugfix-windows
Bugfix to run the code on windows machines.
This commit is contained in:
commit
72853533a0
144
main.py
144
main.py
|
@ -16,51 +16,53 @@ import os
|
||||||
import models.dcgan as dcgan
|
import models.dcgan as dcgan
|
||||||
import models.mlp as mlp
|
import models.mlp as mlp
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
if __name__=="__main__":
|
||||||
parser.add_argument('--dataset', required=True, help='cifar10 | lsun | imagenet | folder | lfw ')
|
|
||||||
parser.add_argument('--dataroot', required=True, help='path to dataset')
|
|
||||||
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
|
|
||||||
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
|
|
||||||
parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')
|
|
||||||
parser.add_argument('--nc', type=int, default=3, help='input image channels')
|
|
||||||
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
|
|
||||||
parser.add_argument('--ngf', type=int, default=64)
|
|
||||||
parser.add_argument('--ndf', type=int, default=64)
|
|
||||||
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
|
|
||||||
parser.add_argument('--lrD', type=float, default=0.00005, help='learning rate for Critic, default=0.00005')
|
|
||||||
parser.add_argument('--lrG', type=float, default=0.00005, help='learning rate for Generator, default=0.00005')
|
|
||||||
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
|
|
||||||
parser.add_argument('--cuda' , action='store_true', help='enables cuda')
|
|
||||||
parser.add_argument('--ngpu' , type=int, default=1, help='number of GPUs to use')
|
|
||||||
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
|
|
||||||
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
|
|
||||||
parser.add_argument('--clamp_lower', type=float, default=-0.01)
|
|
||||||
parser.add_argument('--clamp_upper', type=float, default=0.01)
|
|
||||||
parser.add_argument('--Diters', type=int, default=5, help='number of D iters per each G iter')
|
|
||||||
parser.add_argument('--noBN', action='store_true', help='use batchnorm or not (only for DCGAN)')
|
|
||||||
parser.add_argument('--mlp_G', action='store_true', help='use MLP for G')
|
|
||||||
parser.add_argument('--mlp_D', action='store_true', help='use MLP for D')
|
|
||||||
parser.add_argument('--n_extra_layers', type=int, default=0, help='Number of extra layers on gen and disc')
|
|
||||||
parser.add_argument('--experiment', default=None, help='Where to store samples and models')
|
|
||||||
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
|
|
||||||
opt = parser.parse_args()
|
|
||||||
print(opt)
|
|
||||||
|
|
||||||
if opt.experiment is None:
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--dataset', required=True, help='cifar10 | lsun | imagenet | folder | lfw ')
|
||||||
|
parser.add_argument('--dataroot', required=True, help='path to dataset')
|
||||||
|
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
|
||||||
|
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
|
||||||
|
parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')
|
||||||
|
parser.add_argument('--nc', type=int, default=3, help='input image channels')
|
||||||
|
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
|
||||||
|
parser.add_argument('--ngf', type=int, default=64)
|
||||||
|
parser.add_argument('--ndf', type=int, default=64)
|
||||||
|
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
|
||||||
|
parser.add_argument('--lrD', type=float, default=0.00005, help='learning rate for Critic, default=0.00005')
|
||||||
|
parser.add_argument('--lrG', type=float, default=0.00005, help='learning rate for Generator, default=0.00005')
|
||||||
|
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
|
||||||
|
parser.add_argument('--cuda' , action='store_true', help='enables cuda')
|
||||||
|
parser.add_argument('--ngpu' , type=int, default=1, help='number of GPUs to use')
|
||||||
|
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
|
||||||
|
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
|
||||||
|
parser.add_argument('--clamp_lower', type=float, default=-0.01)
|
||||||
|
parser.add_argument('--clamp_upper', type=float, default=0.01)
|
||||||
|
parser.add_argument('--Diters', type=int, default=5, help='number of D iters per each G iter')
|
||||||
|
parser.add_argument('--noBN', action='store_true', help='use batchnorm or not (only for DCGAN)')
|
||||||
|
parser.add_argument('--mlp_G', action='store_true', help='use MLP for G')
|
||||||
|
parser.add_argument('--mlp_D', action='store_true', help='use MLP for D')
|
||||||
|
parser.add_argument('--n_extra_layers', type=int, default=0, help='Number of extra layers on gen and disc')
|
||||||
|
parser.add_argument('--experiment', default=None, help='Where to store samples and models')
|
||||||
|
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
|
||||||
|
opt = parser.parse_args()
|
||||||
|
print(opt)
|
||||||
|
|
||||||
|
if opt.experiment is None:
|
||||||
opt.experiment = 'samples'
|
opt.experiment = 'samples'
|
||||||
os.system('mkdir {0}'.format(opt.experiment))
|
os.system('mkdir {0}'.format(opt.experiment))
|
||||||
|
|
||||||
opt.manualSeed = random.randint(1, 10000) # fix seed
|
opt.manualSeed = random.randint(1, 10000) # fix seed
|
||||||
print("Random Seed: ", opt.manualSeed)
|
print("Random Seed: ", opt.manualSeed)
|
||||||
random.seed(opt.manualSeed)
|
random.seed(opt.manualSeed)
|
||||||
torch.manual_seed(opt.manualSeed)
|
torch.manual_seed(opt.manualSeed)
|
||||||
|
|
||||||
cudnn.benchmark = True
|
cudnn.benchmark = True
|
||||||
|
|
||||||
if torch.cuda.is_available() and not opt.cuda:
|
if torch.cuda.is_available() and not opt.cuda:
|
||||||
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
|
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
|
||||||
|
|
||||||
if opt.dataset in ['imagenet', 'folder', 'lfw']:
|
if opt.dataset in ['imagenet', 'folder', 'lfw']:
|
||||||
# folder dataset
|
# folder dataset
|
||||||
dataset = dset.ImageFolder(root=opt.dataroot,
|
dataset = dset.ImageFolder(root=opt.dataroot,
|
||||||
transform=transforms.Compose([
|
transform=transforms.Compose([
|
||||||
|
@ -69,7 +71,7 @@ if opt.dataset in ['imagenet', 'folder', 'lfw']:
|
||||||
transforms.ToTensor(),
|
transforms.ToTensor(),
|
||||||
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
||||||
]))
|
]))
|
||||||
elif opt.dataset == 'lsun':
|
elif opt.dataset == 'lsun':
|
||||||
dataset = dset.LSUN(db_path=opt.dataroot, classes=['bedroom_train'],
|
dataset = dset.LSUN(db_path=opt.dataroot, classes=['bedroom_train'],
|
||||||
transform=transforms.Compose([
|
transform=transforms.Compose([
|
||||||
transforms.Scale(opt.imageSize),
|
transforms.Scale(opt.imageSize),
|
||||||
|
@ -77,7 +79,7 @@ elif opt.dataset == 'lsun':
|
||||||
transforms.ToTensor(),
|
transforms.ToTensor(),
|
||||||
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
||||||
]))
|
]))
|
||||||
elif opt.dataset == 'cifar10':
|
elif opt.dataset == 'cifar10':
|
||||||
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
|
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
|
||||||
transform=transforms.Compose([
|
transform=transforms.Compose([
|
||||||
transforms.Scale(opt.imageSize),
|
transforms.Scale(opt.imageSize),
|
||||||
|
@ -85,19 +87,19 @@ elif opt.dataset == 'cifar10':
|
||||||
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
assert dataset
|
assert dataset
|
||||||
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
|
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
|
||||||
shuffle=True, num_workers=int(opt.workers))
|
shuffle=True, num_workers=int(opt.workers))
|
||||||
|
|
||||||
ngpu = int(opt.ngpu)
|
ngpu = int(opt.ngpu)
|
||||||
nz = int(opt.nz)
|
nz = int(opt.nz)
|
||||||
ngf = int(opt.ngf)
|
ngf = int(opt.ngf)
|
||||||
ndf = int(opt.ndf)
|
ndf = int(opt.ndf)
|
||||||
nc = int(opt.nc)
|
nc = int(opt.nc)
|
||||||
n_extra_layers = int(opt.n_extra_layers)
|
n_extra_layers = int(opt.n_extra_layers)
|
||||||
|
|
||||||
# custom weights initialization called on netG and netD
|
# custom weights initialization called on netG and netD
|
||||||
def weights_init(m):
|
def weights_init(m):
|
||||||
classname = m.__class__.__name__
|
classname = m.__class__.__name__
|
||||||
if classname.find('Conv') != -1:
|
if classname.find('Conv') != -1:
|
||||||
m.weight.data.normal_(0.0, 0.02)
|
m.weight.data.normal_(0.0, 0.02)
|
||||||
|
@ -105,51 +107,51 @@ def weights_init(m):
|
||||||
m.weight.data.normal_(1.0, 0.02)
|
m.weight.data.normal_(1.0, 0.02)
|
||||||
m.bias.data.fill_(0)
|
m.bias.data.fill_(0)
|
||||||
|
|
||||||
if opt.noBN:
|
if opt.noBN:
|
||||||
netG = dcgan.DCGAN_G_nobn(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers)
|
netG = dcgan.DCGAN_G_nobn(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers)
|
||||||
elif opt.mlp_G:
|
elif opt.mlp_G:
|
||||||
netG = mlp.MLP_G(opt.imageSize, nz, nc, ngf, ngpu)
|
netG = mlp.MLP_G(opt.imageSize, nz, nc, ngf, ngpu)
|
||||||
else:
|
else:
|
||||||
netG = dcgan.DCGAN_G(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers)
|
netG = dcgan.DCGAN_G(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers)
|
||||||
|
|
||||||
netG.apply(weights_init)
|
netG.apply(weights_init)
|
||||||
if opt.netG != '': # load checkpoint if needed
|
if opt.netG != '': # load checkpoint if needed
|
||||||
netG.load_state_dict(torch.load(opt.netG))
|
netG.load_state_dict(torch.load(opt.netG))
|
||||||
print(netG)
|
print(netG)
|
||||||
|
|
||||||
if opt.mlp_D:
|
if opt.mlp_D:
|
||||||
netD = mlp.MLP_D(opt.imageSize, nz, nc, ndf, ngpu)
|
netD = mlp.MLP_D(opt.imageSize, nz, nc, ndf, ngpu)
|
||||||
else:
|
else:
|
||||||
netD = dcgan.DCGAN_D(opt.imageSize, nz, nc, ndf, ngpu, n_extra_layers)
|
netD = dcgan.DCGAN_D(opt.imageSize, nz, nc, ndf, ngpu, n_extra_layers)
|
||||||
netD.apply(weights_init)
|
netD.apply(weights_init)
|
||||||
|
|
||||||
if opt.netD != '':
|
if opt.netD != '':
|
||||||
netD.load_state_dict(torch.load(opt.netD))
|
netD.load_state_dict(torch.load(opt.netD))
|
||||||
print(netD)
|
print(netD)
|
||||||
|
|
||||||
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
|
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
|
||||||
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
|
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
|
||||||
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
|
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
|
||||||
one = torch.FloatTensor([1])
|
one = torch.FloatTensor([1])
|
||||||
mone = one * -1
|
mone = one * -1
|
||||||
|
|
||||||
if opt.cuda:
|
if opt.cuda:
|
||||||
netD.cuda()
|
netD.cuda()
|
||||||
netG.cuda()
|
netG.cuda()
|
||||||
input = input.cuda()
|
input = input.cuda()
|
||||||
one, mone = one.cuda(), mone.cuda()
|
one, mone = one.cuda(), mone.cuda()
|
||||||
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
|
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
|
||||||
|
|
||||||
# setup optimizer
|
# setup optimizer
|
||||||
if opt.adam:
|
if opt.adam:
|
||||||
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.999))
|
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.999))
|
||||||
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.999))
|
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.999))
|
||||||
else:
|
else:
|
||||||
optimizerD = optim.RMSprop(netD.parameters(), lr = opt.lrD)
|
optimizerD = optim.RMSprop(netD.parameters(), lr = opt.lrD)
|
||||||
optimizerG = optim.RMSprop(netG.parameters(), lr = opt.lrG)
|
optimizerG = optim.RMSprop(netG.parameters(), lr = opt.lrG)
|
||||||
|
|
||||||
gen_iterations = 0
|
gen_iterations = 0
|
||||||
for epoch in range(opt.niter):
|
for epoch in range(opt.niter):
|
||||||
data_iter = iter(dataloader)
|
data_iter = iter(dataloader)
|
||||||
i = 0
|
i = 0
|
||||||
while i < len(dataloader):
|
while i < len(dataloader):
|
||||||
|
|
Loading…
Reference in New Issue