ChronoEdit-14B-SDNQ-uint4-svd-r32

36
14.0B
license:apache-2.0
by
Disty0
Other
OTHER
14B params
New
36 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
32GB+ RAM
Mobile
Laptop
Server
Quick Summary

4 bit (UINT4 with SVD rank 32) quantization of nvidia/ChronoEdit-14B-Diffusers using SDNQ.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
14GB+ RAM

Code Examples

text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
text
pip install git+https://github.com/Disty0/sdnq
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")
pythonpytorch
import math
import torch
from PIL import Image
from diffusers.utils import load_image
from chronoedit_diffusers.pipeline_chronoedit import ChronoEditPipeline
from sdnq import SDNQConfig # import sdnq to register it into diffusers and transformers

pipe = ChronoEditPipeline.from_pretrained("Disty0/ChronoEdit-14B-SDNQ-uint4-svd-r32", torch_dtype=torch.bfloat16)
pipe.enable_model_cpu_offload()

input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png")
max_area = 480 * 832
aspect_ratio = input_image.height / input_image.width
mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
height = round(math.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
width = round(math.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
input_image = input_image.resize((width, height))

output = pipe(
    image=input_image,
    prompt="Add a hat to the cat",
    height=height,
    width=width,
    num_frames=5,
    guidance_scale=2.5,
    generator=torch.manual_seed(0),
).frames[0]
image = Image.fromarray((output[-1] * 255).clip(0, 255).astype("uint8"))
image.save("chrono-edit-sdnq-uint4-svd-r32.png.png")

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.