Spaces:
Runtime error
Runtime error
Create example_images/readme.md (#3)
Browse files- Create example_images/readme.md (c35ff033638f3fc0910053d2069e20c591f0f558)
- Upload 12 files (fde2e0d045a087ddc3b40f9d9b16cbad464dc26e)
- adding examples and caching (0ca0fe59817b31e28e821401619aef9cfb54fe53)
Co-authored-by: yuvraj sharma <[email protected]>
- .gitattributes +7 -0
- app.py +34 -8
- example_images/readme.md +1 -0
- example_images/sample1.png +0 -0
- example_images/sample10.png +3 -0
- example_images/sample11.jpg +0 -0
- example_images/sample12.jpg +0 -0
- example_images/sample13.png +3 -0
- example_images/sample2.png +3 -0
- example_images/sample4.png +3 -0
- example_images/sample5.png +3 -0
- example_images/sample6.png +3 -0
- example_images/sample7.png +3 -0
- example_images/sample8.jpg +0 -0
- example_images/sample9.jpg +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
example_images/sample10.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
example_images/sample13.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
example_images/sample2.png filter=lfs diff=lfs merge=lfs -text
|
39 |
+
example_images/sample4.png filter=lfs diff=lfs merge=lfs -text
|
40 |
+
example_images/sample5.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
example_images/sample6.png filter=lfs diff=lfs merge=lfs -text
|
42 |
+
example_images/sample7.png filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
@@ -6,12 +6,16 @@ from modelscope.outputs import OutputKeys
|
|
6 |
|
7 |
image_to_video_pipe = pipeline(task="image-to-video", model='damo/i2vgen-xl', model_revision='v1.1.4', device='cuda:0')
|
8 |
|
|
|
|
|
|
|
|
|
9 |
def upload_file(file):
|
10 |
return file.name
|
11 |
|
12 |
def image_to_video(image_in, text_in):
|
13 |
if image_in is None:
|
14 |
-
raise gr.Error('请上传图片或等待图片上传完成')
|
15 |
print(image_in)
|
16 |
output_video_path = image_to_video_pipe(image_in, caption=text_in)[OutputKeys.OUTPUT_VIDEO]
|
17 |
print(output_video_path)
|
@@ -34,17 +38,39 @@ with gr.Blocks() as demo:
|
|
34 |
)
|
35 |
with gr.Row():
|
36 |
with gr.Column():
|
37 |
-
text_in = gr.Textbox(label="
|
38 |
-
image_in = gr.Image(label="
|
39 |
with gr.Row():
|
40 |
-
upload_image = gr.UploadButton("
|
41 |
-
image_submit = gr.Button("
|
42 |
with gr.Column():
|
43 |
-
video_out_1 = gr.Video(label='
|
44 |
-
gr.Markdown("<left><font size=2>注:如果生成的视频无法播放,请尝试升级浏览器或使用chrome浏览器。</left>
|
45 |
-
|
|
|
|
|
46 |
|
47 |
upload_image.upload(upload_file, upload_image, image_in, queue=False)
|
48 |
image_submit.click(fn=image_to_video, inputs=[image_in, text_in], outputs=[video_out_1])
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
demo.queue(status_update_rate=1, api_open=False).launch(share=False, show_error=True)
|
|
|
6 |
|
7 |
image_to_video_pipe = pipeline(task="image-to-video", model='damo/i2vgen-xl', model_revision='v1.1.4', device='cuda:0')
|
8 |
|
9 |
+
# Get the current working directory
|
10 |
+
current_directory = os.path.dirname(os.path.realpath(__file__))
|
11 |
+
print("Current Working Directory:", current_directory)
|
12 |
+
|
13 |
def upload_file(file):
|
14 |
return file.name
|
15 |
|
16 |
def image_to_video(image_in, text_in):
|
17 |
if image_in is None:
|
18 |
+
raise gr.Error('Please upload the image or wait for the image upload to complete / 请上传图片或等待图片上传完成')
|
19 |
print(image_in)
|
20 |
output_video_path = image_to_video_pipe(image_in, caption=text_in)[OutputKeys.OUTPUT_VIDEO]
|
21 |
print(output_video_path)
|
|
|
38 |
)
|
39 |
with gr.Row():
|
40 |
with gr.Column():
|
41 |
+
text_in = gr.Textbox(label="Text description/文本描述", lines=2, elem_id="text-in")
|
42 |
+
image_in = gr.Image(label="Image Input/图片输入", type="filepath", interactive=False, elem_id="image-in", height=300)
|
43 |
with gr.Row():
|
44 |
+
upload_image = gr.UploadButton("Upload Image/上传图片", file_types=["image"], file_count="single")
|
45 |
+
image_submit = gr.Button("Generate video/生成视频🎬")
|
46 |
with gr.Column():
|
47 |
+
video_out_1 = gr.Video(label='Generated Video/生生成的视频', elem_id='video-out_1', interactive=False, height=300)
|
48 |
+
gr.Markdown("""<left><font size=2>注:如果生成的视频无法播放,请尝试升级浏览器或使用chrome浏览器。</left>
|
49 |
+
|
50 |
+
<left><font size=2>Note: If the generated video cannot be played, please try to upgrade your browser or use the Chrome browser.</left>"""
|
51 |
+
)
|
52 |
|
53 |
upload_image.upload(upload_file, upload_image, image_in, queue=False)
|
54 |
image_submit.click(fn=image_to_video, inputs=[image_in, text_in], outputs=[video_out_1])
|
55 |
|
56 |
+
with gr.Row(variant="panel"):
|
57 |
+
gr.Examples(examples=[
|
58 |
+
[os.path.join(current_directory, 'example_images/sample2.png'), 'A girl with yellow hair and black clothes stood in front of the camera'],
|
59 |
+
[os.path.join(current_directory, 'example_images/sample13.png'), 'A girl in hat and sunglasses'],
|
60 |
+
[os.path.join(current_directory, 'example_images/sample6.png'), 'Papers were floating in the air on a table in the library'],
|
61 |
+
[os.path.join(current_directory, 'example_images/sample12.jpg'),'Night sky lit with milky way galaxy'],
|
62 |
+
[os.path.join(current_directory, 'example_images/sample1.png'), 'A blonde girl in jeans'],
|
63 |
+
[os.path.join(current_directory, 'example_images/sample11.jpg'),'A wet road between the woods'],
|
64 |
+
[os.path.join(current_directory, 'example_images/sample5.png'), 'A painting of a city street with a giant monster'],
|
65 |
+
[os.path.join(current_directory, 'example_images/sample7.png'), 'A red woodcut bird'],
|
66 |
+
[os.path.join(current_directory, 'example_images/sample8.jpg'), 'A green frog floats on the surface of the water on green lotus leaves, with several pink lotus flowers, in a Chinese painting style.']
|
67 |
+
],
|
68 |
+
inputs=[image_in, text_in],
|
69 |
+
outputs=[video_out_1],
|
70 |
+
fn=image_to_video,
|
71 |
+
cache_examples=True,
|
72 |
+
examples_per_page=5,
|
73 |
+
label='Examples',
|
74 |
+
)
|
75 |
+
|
76 |
demo.queue(status_update_rate=1, api_open=False).launch(share=False, show_error=True)
|
example_images/readme.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
## Adding examples and caching for the demo
|
example_images/sample1.png
ADDED
example_images/sample10.png
ADDED
Git LFS Details
|
example_images/sample11.jpg
ADDED
example_images/sample12.jpg
ADDED
example_images/sample13.png
ADDED
Git LFS Details
|
example_images/sample2.png
ADDED
Git LFS Details
|
example_images/sample4.png
ADDED
Git LFS Details
|
example_images/sample5.png
ADDED
Git LFS Details
|
example_images/sample6.png
ADDED
Git LFS Details
|
example_images/sample7.png
ADDED
Git LFS Details
|
example_images/sample8.jpg
ADDED
example_images/sample9.jpg
ADDED