The video created is just a green screen
after downloading video from google colab, the video is just a green screen. I used the same prompt as in example
I have the same problem, have you solved it?
I have the same problem, have you solved it?
I have no idea how to fix this, need help.
Replace export to video with this:
def export_to_video(video_frames, output_video_path):
# Get the dimensions of the first frame to set the dimensions of the video
first_frame = video_frames[0]
h, w, layers = first_frame.shape
fps = 24 # frames per second
# Create a video writer object
with imageio.get_writer(output_video_path, fps=fps) as writer:
for frame in video_frames:
# Convert the numpy array to a PIL Image
#pil_img = Image.fromarray(frame)
# Append the image to the video
writer.append_data(frame)#np.array(pil_img))
I tried with the code provided but got this error:
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
import imageio
from diffusers.utils import export_to_video
def export_to_video(video_frames, output_video_path):
# Get the dimensions of the first frame to set the dimensions of the video
first_frame = video_frames[0]
h, w, layers = first_frame.shape
fps = 24 # frames per second
# Create a video writer object
with imageio.get_writer(output_video_path, fps=fps) as writer:
for frame in video_frames:
# Convert the numpy array to a PIL Image
#pil_img = Image.fromarray(frame)
# Append the image to the video
writer.append_data(frame)#np.array(pil_img))
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
prompt = "Darth Vader is surfing on waves"
video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
video_path = export_to_video(video_frames, "/video")
ERROR:
ValueError Traceback (most recent call last)
in <cell line: 26>()
24 prompt = "Darth Vader is surfing on waves"
25 video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
---> 26 video_path = export_to_video(video_frames, "/video")
27
28
2 frames
in export_to_video(video_frames, output_video_path)
11
12 # Create a video writer object
---> 13 with imageio.get_writer(output_video_path, fps=fps) as writer:
14 for frame in video_frames:
15 # Convert the numpy array to a PIL Image
/usr/local/lib/python3.10/dist-packages/imageio/v2.py in get_writer(uri, format, mode, **kwargs)
322 imopen_args["legacy_mode"] = True
323
--> 324 image_file = imopen(uri, "w" + mode, **imopen_args)
325 if isinstance(image_file, LegacyPlugin):
326 return image_file.legacy_get_writer(**kwargs)
/usr/local/lib/python3.10/dist-packages/imageio/core/imopen.py in imopen(uri, io_mode, plugin, extension, format_hint, legacy_mode, **kwargs)
221 "Specify the plugin explicitly using the plugin
kwarg, e.g. plugin='DICOM'
"
222 )
--> 223 raise err_type(err_msg)
224
225 # close the current request here and use fresh/new ones while trying each
ValueError: ImageIO does not generally support reading folders. Limited support may be available via specific plugins. Specify the plugin explicitly using the plugin
kwarg, e.g. plugin='DICOM'