Skip to content

Commit

Permalink
Merge pull request #570 from graphemecluster/master
Browse files Browse the repository at this point in the history
Fix demo error with custom video
  • Loading branch information
AliaksandrSiarohin authored Jun 5, 2023
2 parents 96127d6 + 480613e commit 9d5f51d
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 12 deletions.
7 changes: 5 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,12 @@ docker run -it --rm --gpus all \
```

### Colab Demo
@graphemecluster prepared a gui-demo for the google-colab see: ```demo.ipynb```. To run press ```Open In Colab``` button.

For old demo, see ```old-demo.ipynb```.
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AliaksandrSiarohin/first-order-model/blob/master/demo.ipynb) [![Open in Kaggle](https://kaggle.com/static/images/open-in-kaggle.svg)](https://kaggle.com/kernels/welcome?src=https://github.com/AliaksandrSiarohin/first-order-model/blob/master/demo.ipynb)

@graphemecluster prepared a GUI demo for the Google Colab. It also works in Kaggle. For the source code, see [```demo.ipynb```](https://github.com/AliaksandrSiarohin/first-order-model/blob/master/demo.ipynb).

For the old demo, see [```old_demo.ipynb```](https://github.com/AliaksandrSiarohin/first-order-model/blob/master/old_demo.ipynb).

### Face-swap
It is possible to modify the method to perform face-swap using supervised segmentation masks.
Expand Down
14 changes: 9 additions & 5 deletions demo.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -446,14 +446,14 @@
"\t\tcontent = file_info['content']\n",
"\tif content is not None:\n",
"\t\tselected_video = 'user/' + name\n",
"\t\tpreview = resize(PIL.Image.fromarray(thumbnail(content)).convert(\"RGB\"))\n",
"\t\twith open(selected_video, 'wb') as video:\n",
"\t\t\tvideo.write(content)\n",
"\t\tpreview = resize(PIL.Image.fromarray(thumbnail(selected_video)).convert(\"RGB\"))\n",
"\t\tinput_video_widget.clear_output(wait=True)\n",
"\t\twith input_video_widget:\n",
"\t\t\tdisplay(preview)\n",
"\t\tinput_video_widget.add_class('uploaded')\n",
"\t\tdisplay(Javascript('deselectVideos()'))\n",
"\t\twith open(selected_video, 'wb') as video:\n",
"\t\t\tvideo.write(content)\n",
"upload_input_video_button.observe(upload_video, names='value')\n",
"\n",
"def change_model(change):\n",
Expand Down Expand Up @@ -491,16 +491,20 @@
"\t\t)\n",
"\tprogress_bar.clear_output()\n",
"\timageio.mimsave('output.mp4', [img_as_ubyte(frame) for frame in predictions], fps=fps)\n",
"\tif selected_video.startswith('user/') or selected_video == 'demo/videos/0.mp4':\n",
"\ttry:\n",
"\t\twith NamedTemporaryFile(suffix='.mp4') as output:\n",
"\t\t\tffmpeg.output(ffmpeg.input('output.mp4').video, ffmpeg.input(selected_video).audio, output.name, c='copy').overwrite_output().run()\n",
"\t\t\tffmpeg.output(ffmpeg.input('output.mp4').video, ffmpeg.input(selected_video).audio, output.name, c='copy').run()\n",
"\t\t\twith open('output.mp4', 'wb') as result:\n",
"\t\t\t\tcopyfileobj(output, result)\n",
"\texcept ffmpeg.Error:\n",
"\t\tpass\n",
"\toutput_widget.clear_output(True)\n",
"\twith output_widget:\n",
"\t\tvideo_widget = ipywidgets.Video.from_file('output.mp4', autoplay=False, loop=False)\n",
"\t\tvideo_widget.add_class('video')\n",
"\t\tvideo_widget.add_class('video-left')\n",
"\t\tdisplay(video_widget)\n",
"\tcomparison_widget.clear_output(True)\n",
"\twith comparison_widget:\n",
"\t\tvideo_widget = ipywidgets.Video.from_file(selected_video, autoplay=False, loop=False, controls=False)\n",
"\t\tvideo_widget.add_class('video')\n",
Expand Down
13 changes: 8 additions & 5 deletions demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def normalize_kp(kp):

if opt.find_best_frame or opt.best_frame is not None:
i = opt.best_frame if opt.best_frame is not None else find_best_frame(source_image, driving_video, cpu=opt.cpu)
print ("Best frame: " + str(i))
print("Best frame: " + str(i))
driving_forward = driving_video[i:]
driving_backward = driving_video[:(i+1)][::-1]
predictions_forward = make_animation(source_image, driving_forward, generator, kp_detector, relative=opt.relative, adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
Expand All @@ -159,7 +159,10 @@ def normalize_kp(kp):
imageio.mimsave(opt.result_video, [img_as_ubyte(frame) for frame in predictions], fps=fps)

if opt.audio:
with NamedTemporaryFile(suffix='.' + splitext(opt.result_video)[1]) as output:
ffmpeg.output(ffmpeg.input(opt.result_video).video, ffmpeg.input(opt.driving_video).audio, output.name, c='copy').run()
with open(opt.result_video, 'wb') as result:
copyfileobj(output, result)
try:
with NamedTemporaryFile(suffix=splitext(opt.result_video)[1]) as output:
ffmpeg.output(ffmpeg.input(opt.result_video).video, ffmpeg.input(opt.driving_video).audio, output.name, c='copy').run()
with open(opt.result_video, 'wb') as result:
copyfileobj(output, result)
except ffmpeg.Error:
print("Failed to copy audio: the driving video may have no audio track or the audio format is invalid.")

0 comments on commit 9d5f51d

Please sign in to comment.