taef1
2
license:mit
by
silveroxides
Other
OTHER
New
2 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary
TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.
Code Examples
Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Using in 🧨 diffuserspythonpytorch
import torch
from diffusers import FluxPipeline, AutoencoderTiny
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
)
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16)
pipe.enable_sequential_cpu_offload()
prompt = "slice of delicious New York-style berry cheesecake"
image = pipe(
prompt,
guidance_scale=0.0,
num_inference_steps=4,
max_sequence_length=256,
).images[0]
image.save("cheesecake.png")Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.