import torch
from diffusers import DiffusionPipeline
from diffusers.utils import export_to_video
AI Generated Videos in Python
Generate videos from text in python using the huggingface diffusers library.
Imports
Import pytorch and the diffusion pipeline from huggingface diffusers library.
Inference
Loading a pre-trained checkpoint to generate a video.
Load Pipeline Checkpoint
= DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
pipe = pipe.to("cuda") pipe
Enter a prompt and Video output path
= "Darth Vader is running"
prompt = "/content/video.mp4" output_path
Generate Video
Might take a while.
= pipe(prompt).frames
video_frames = export_to_video(video_frames, output_path)
video_path video_path
'/content/video.mp4'
Play the Video
import imageio
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.transform import resize
from IPython.display import HTML
def display_video(video):
= plt.figure(figsize=(3,3))
fig
= []
mov for i in range(len(video)):
= plt.imshow(video[i], animated=True)
img 'off')
plt.axis(
mov.append([img])
= animation.ArtistAnimation(fig, mov, interval=100, repeat_delay=1000)
anime
plt.close()return anime
= imageio.mimread(video_path)
video HTML(display_video(video).to_html5_video())