Skip to content

Commit

Permalink
Merge pull request #1 from an0nym0u5-hackerese/main
Browse files Browse the repository at this point in the history
finished some modification
  • Loading branch information
fei-aiart committed Feb 20, 2021
2 parents ed4593a + 84c755f commit 52f84d1
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 19 deletions.
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ Citation:
* download pretrain model and put them in **./checkpoints** for test
* for model with stroke_loss, you should download [stroke_model](https://drive.google.com/file/d/16gSERA3TbPVFyCvKGtNKtJrQaOsG8vmO/view?usp=sharing) and specify model root in
**./models/pix2pixHD_model** for **net_c.load_state_dict**
* you can modify **options/base_option** to specify **--dataroot**, then run train.py or test.py
* you can modify **options/base_option** to specify **--dataroot**, then run train.py or test.py


## Dataset
Expand All @@ -52,11 +52,11 @@ Citation:

- charcoal_style from GooleDrive
- [sRender w/o Lstr](https://drive.google.com/file/d/1mwGiFpXfMlcUw-ksfsyVhUSKQWQJGC6p/view?usp=sharing)
- [sRender](https://drive.google.com/file/d/1_A1rrbDILin6Cby1mD1SxIFAGRrUL4xV/view?usp=sharing)
- [sRender](https://drive.google.com/file/d/1jwxvZZAJ-0gr_XJX3i3rBNQYqh8FCRCI/view?usp=sharing)
- croquis_style from GooleDrive
- [sRenderPix2Pix](https://drive.google.com/file/d/1GIRcc8q-plIXKxSDEug4UMXacB35w0G5/view?usp=sharing)
- [sRender w/o Lstr](https://drive.google.com/file/d/1JdVhJDVCcFQ1jtNfNy-Q05UL4IVqkqw3/view?usp=sharing)
- [sRender](https://drive.google.com/file/d/1PdSWrr4W60daA2xrH63kr8925gP_vtdA/view?usp=sharing)
- [sRender](https://drive.google.com/file/d/1AKyX1u7RieCwP8b8WEUOzkXufLgRotr0/view?usp=sharing)

## Results

Expand Down
13 changes: 6 additions & 7 deletions charcoal_style/stroke/models/pix2pixHD_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ def name(self):
return 'Pix2PixHDModel'

def init_loss_filter(self, use_gan_feat_loss, use_vgg_loss, use_loss_style):
flags = (True, use_gan_feat_loss, use_vgg_loss, use_loss_style, True, True, True, True)
flags = (True, use_gan_feat_loss, use_vgg_loss, use_loss_style, True, True, True)

def loss_filter(g_gan, g_gan_feat, g_vgg, g_style, d_real, d_fake, l1, lossc):
return [l for (l, f) in zip((g_gan, g_gan_feat, g_vgg, g_style, d_real, d_fake, l1, lossc), flags) if f]
def loss_filter(g_gan, g_gan_feat, g_vgg, g_style, d_real, d_fake, lossc):
return [l for (l, f) in zip((g_gan, g_gan_feat, g_vgg, g_style, d_real, d_fake, lossc), flags) if f]

return loss_filter

Expand Down Expand Up @@ -69,7 +69,7 @@ def initialize(self, opt):
opt.n_downsample_E, norm=opt.norm, gpu_ids=self.gpu_ids)

self.net_C = DenseNet(growthRate=6, depth=10, reduction=0.5, bottleneck=True, nClasses=7)
self.net_C.load_state_dict(torch.load("/home/meimei/mayme/data/stroke7_result/checkpoints/net_C_ins180.pth", map_location='cuda:0'))
self.net_C.load_state_dict(torch.load("/data/meimei/code/storke_class7/checkpoints/net_C_ins180.pth", map_location='cuda:1'))
# self.net_C.load_state_dict(torch.load("/data/mayme/dataset/result/checkpoints/net_C_ins620.pth"))
self.net_C.cuda()

Expand Down Expand Up @@ -104,7 +104,7 @@ def initialize(self, opt):
self.criterionStyle = networks.StyleLoss(self.gpu_ids)

# Names so we can breakout loss
self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG', 'G_Style', 'D_real', 'D_fake', 'loss_L1', 'loss_C')
self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG', 'G_Style', 'D_real', 'D_fake', 'loss_C')

# initialize optimizers
# optimizer G
Expand Down Expand Up @@ -239,10 +239,9 @@ def forward(self, label, inst, image, feat, infer=False):
if not self.opt.no_style_loss:
loss_G_Style = self.criterionStyle(fake_image, real_image) * 1e5 ###style_loss

loss_L1 = self.criterionFeat(fake_image, real_image) * self.opt.lambda_feat
# Only return the fake_B image if necessary to save BW
return [
self.loss_filter(loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_G_Style, loss_D_real, loss_D_fake, loss_L1,loss_C),
self.loss_filter(loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_G_Style, loss_D_real, loss_D_fake, loss_C),
None if not infer else fake_image]

def inference(self, label, inst, image=None):
Expand Down
2 changes: 1 addition & 1 deletion charcoal_style/stroke/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def lcm(a,b): return abs(a * b)/fractions.gcd(a,b) if a and b else 0

# calculate final loss scalar
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('loss_L1') +loss_dict['loss_C']
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('G_VGG',0) +loss_dict['loss_C']


############### Backward Pass ####################
Expand Down
13 changes: 6 additions & 7 deletions croquis_style/stroke/models/pix2pixHD_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ def name(self):
return 'Pix2PixHDModel'

def init_loss_filter(self, use_gan_feat_loss, use_vgg_loss, use_loss_style):
flags = (True, use_gan_feat_loss, use_vgg_loss, use_loss_style, True, True, True, True)
flags = (True, use_gan_feat_loss, use_vgg_loss, use_loss_style, True, True, True)

def loss_filter(g_gan, g_gan_feat, g_vgg, g_style, d_real, d_fake, l1, lossc):
return [l for (l, f) in zip((g_gan, g_gan_feat, g_vgg, g_style, d_real, d_fake, l1, lossc), flags) if f]
def loss_filter(g_gan, g_gan_feat, g_vgg, g_style, d_real, d_fake, lossc):
return [l for (l, f) in zip((g_gan, g_gan_feat, g_vgg, g_style, d_real, d_fake, lossc), flags) if f]

return loss_filter

Expand Down Expand Up @@ -69,7 +69,7 @@ def initialize(self, opt):
opt.n_downsample_E, norm=opt.norm, gpu_ids=self.gpu_ids)

self.net_C = DenseNet(growthRate=6, depth=10, reduction=0.5, bottleneck=True, nClasses=7)
self.net_C.load_state_dict(torch.load("/home/meimei/mayme/data/stroke7_result/checkpoints/net_C_ins180.pth", map_location='cuda:0'))
self.net_C.load_state_dict(torch.load("/data/meimei/code/storke_class7/checkpoints/net_C_ins180.pth", map_location='cuda:1'))
# self.net_C.load_state_dict(torch.load("/data/mayme/dataset/result/checkpoints/net_C_ins620.pth"))
self.net_C.cuda()

Expand Down Expand Up @@ -104,7 +104,7 @@ def initialize(self, opt):
self.criterionStyle = networks.StyleLoss(self.gpu_ids)

# Names so we can breakout loss
self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG', 'G_Style', 'D_real', 'D_fake', 'loss_L1', 'loss_C')
self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG', 'G_Style', 'D_real', 'D_fake', 'loss_C')

# initialize optimizers
# optimizer G
Expand Down Expand Up @@ -239,10 +239,9 @@ def forward(self, label, inst, image, feat, infer=False):
if not self.opt.no_style_loss:
loss_G_Style = self.criterionStyle(fake_image, real_image) * 1e5 ###style_loss

loss_L1 = self.criterionFeat(fake_image, real_image) * self.opt.lambda_feat
# Only return the fake_B image if necessary to save BW
return [
self.loss_filter(loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_G_Style, loss_D_real, loss_D_fake, loss_L1,loss_C),
self.loss_filter(loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_G_Style, loss_D_real, loss_D_fake, loss_C),
None if not infer else fake_image]

def inference(self, label, inst, image=None):
Expand Down
2 changes: 1 addition & 1 deletion croquis_style/stroke/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def lcm(a,b): return abs(a * b)/fractions.gcd(a,b) if a and b else 0

# calculate final loss scalar
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('loss_L1') +loss_dict['loss_C']
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('G_VGG',0) +loss_dict['loss_C']


############### Backward Pass ####################
Expand Down

0 comments on commit 52f84d1

Please sign in to comment.