wan21-vae

1
1 language
by
wangkanai
Video Model
OTHER
New
0 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary

WAN2.1 VAE - 3D Causal Video Variational Autoencoder WAN2.1 VAE is a novel 3D causal Variational Autoencoder specifically designed for high-quality video gener...

Code Examples

Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Resolution-Specific Requirementspythonpytorch
import torch
from diffusers import AutoencoderKL

# Load the WAN2.1 VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

print(f"VAE loaded: {vae.config}")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Encoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL
from PIL import Image
import numpy as np

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Prepare video frames (example with dummy data)
# Shape: [batch, channels, frames, height, width]
video_frames = torch.randn(1, 3, 16, 480, 720).half().to("cuda")

# Encode video to latent space
with torch.no_grad():
    latents = vae.encode(video_frames).latent_dist.sample()

print(f"Latent shape: {latents.shape}")
print(f"Compression ratio: {np.prod(video_frames.shape) / np.prod(latents.shape):.2f}x")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Video Decoding Examplepythonpytorch
import torch
from diffusers import AutoencoderKL

# Load VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
).to("cuda")

# Decode latents back to video frames
# Assuming you have latents from encoding step
with torch.no_grad():
    reconstructed_video = vae.decode(latents).sample

print(f"Reconstructed video shape: {reconstructed_video.shape}")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")
Integration with WAN Modelspythonpytorch
import torch
from diffusers import DiffusionPipeline, AutoencoderKL

# Load custom VAE
vae = AutoencoderKL.from_pretrained(
    "E:/huggingface/wan21-vae/vae/wan",
    torch_dtype=torch.float16
)

# Load WAN model with custom VAE
pipe = DiffusionPipeline.from_pretrained(
    "Wan-AI/Wan2.1-T2V-1.3B",
    vae=vae,
    torch_dtype=torch.float16
).to("cuda")

# Generate video
prompt = "A serene beach at sunset with waves crashing"
video = pipe(prompt, num_frames=16, height=480, width=720).frames

print(f"Generated video: {len(video)} frames")

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.