import torch
from diffusers import DiffusionPipeline
from diffusers.utils import export_to_videoAI Generated Videos in Python
Generate videos from text in python using the huggingface diffusers library.
Imports
Import pytorch and the diffusion pipeline from huggingface diffusers library.
Inference
Loading a pre-trained checkpoint to generate a video.
Load Pipeline Checkpoint
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
pipe = pipe.to("cuda")Enter a prompt and Video output path
prompt = "Darth Vader is running"
output_path = "/content/video.mp4"Generate Video
Might take a while.
Play the Video
import imageio
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.transform import resize
from IPython.display import HTML
def display_video(video):
fig = plt.figure(figsize=(3,3))
mov = []
for i in range(len(video)):
img = plt.imshow(video[i], animated=True)
plt.axis('off')
mov.append([img])
anime = animation.ArtistAnimation(fig, mov, interval=100, repeat_delay=1000)
plt.close()
return animevideo = imageio.mimread(video_path)
HTML(display_video(video).to_html5_video())