F-Lite-Japanese-LoRA

3
2 languages
by
alfredplpl
Image Model
OTHER
New
0 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary

Copyright-safeと言っているF-Liteを日本っぽくするLoRAつくりました。 学習データはCC-0なので、これもCopyright-safeとなります。 Copyright-safeであるならば、生成結果に対して著作権侵害を気にしない画像生成の改造もできるのではないでしょうか。 これにより、マスメディア...

Code Examples

展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")
展望pythonpytorch
import torch
from f_lite import FLitePipeline
from peft import LoraConfig, set_peft_model_state_dict
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES

# Trick required because it is not a native diffusers model
from diffusers.pipelines.pipeline_loading_utils import LOADABLE_CLASSES, ALL_IMPORTABLE_CLASSES
LOADABLE_CLASSES["f_lite"] = LOADABLE_CLASSES["f_lite.model"] = {"DiT": ["save_pretrained", "from_pretrained"]}
ALL_IMPORTABLE_CLASSES["DiT"] = ["save_pretrained", "from_pretrained"]

pipeline = FLitePipeline.from_pretrained("Freepik/F-Lite", torch_dtype=torch.bfloat16)
pipeline.enable_model_cpu_offload() # For less memory consumption. Alternatively, pipeline.to("cuda")
pipeline.vae.enable_slicing()
pipeline.vae.enable_tiling()

# Set LoRA configuration
target_modules = ["qkv", "context_kv", "proj", "q"]  # Target modules detected from LoRA weights
lora_rank = 128  # LoRA rank of your lora weights

# Create LoRA configuration
lora_config = LoraConfig(
    r=lora_rank,
    lora_alpha=lora_rank,  # Typically set to the same value as rank
    target_modules=target_modules,
    bias="none",
    init_lora_weights=False,  # Don't initialize weights, use loaded ones
)

# Add LoRA adapter to the model
pipeline.dit_model.add_adapter(lora_config)

# Load LoRA weights
lora_state_dict = torch.load("lora_weights.pt", weights_only=True)
set_peft_model_state_dict(pipeline.dit_model, lora_state_dict)

# Generate an image
output = pipeline(
    prompt="A photo of Japanese sushi including tuna.",
    height=1024,
    width=1024,
    num_inference_steps=30,
    guidance_scale=3.0,
    negative_prompt=None,
)

# Save the generated image
output.images[0].save("generated_image.png")

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.