FLUX.1-dev-IP-Adapter

2.4K
307
1 language
by
InstantX
Image Model
OTHER
New
2K downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary

This repository contains a IP-Adapter for FLUX.

Code Examples

Inferencepythontransformers
import os
from PIL import Image

import torch
import torch.nn as nn

from pipeline_flux_ipa import FluxPipeline
from transformer_flux import FluxTransformer2DModel
from attention_processor import IPAFluxAttnProcessor2_0
from transformers import AutoProcessor, SiglipVisionModel

from infer_flux_ipa_siglip import resize_img, MLPProjModel, IPAdapter

image_encoder_path = "google/siglip-so400m-patch14-384"
ipadapter_path = "./ip-adapter.bin"
    
transformer = FluxTransformer2DModel.from_pretrained(
    "black-forest-labs/FLUX.1-dev", subfolder="transformer", torch_dtype=torch.bfloat16
)

pipe = FluxPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
)

ip_model = IPAdapter(pipe, image_encoder_path, ipadapter_path, device="cuda", num_tokens=128)

image_dir = "./assets/images/2.jpg"
image_name = image_dir.split("/")[-1]
image = Image.open(image_dir).convert("RGB")
image = resize_img(image)

prompt = "a young girl"
    
images = ip_model.generate(
    pil_image=image, 
    prompt=prompt,
    scale=0.7,
    width=960, height=1280,
    seed=42
)

images[0].save(f"results/{image_name}")
Inferencepythontransformers
import os
from PIL import Image

import torch
import torch.nn as nn

from pipeline_flux_ipa import FluxPipeline
from transformer_flux import FluxTransformer2DModel
from attention_processor import IPAFluxAttnProcessor2_0
from transformers import AutoProcessor, SiglipVisionModel

from infer_flux_ipa_siglip import resize_img, MLPProjModel, IPAdapter

image_encoder_path = "google/siglip-so400m-patch14-384"
ipadapter_path = "./ip-adapter.bin"
    
transformer = FluxTransformer2DModel.from_pretrained(
    "black-forest-labs/FLUX.1-dev", subfolder="transformer", torch_dtype=torch.bfloat16
)

pipe = FluxPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
)

ip_model = IPAdapter(pipe, image_encoder_path, ipadapter_path, device="cuda", num_tokens=128)

image_dir = "./assets/images/2.jpg"
image_name = image_dir.split("/")[-1]
image = Image.open(image_dir).convert("RGB")
image = resize_img(image)

prompt = "a young girl"
    
images = ip_model.generate(
    pil_image=image, 
    prompt=prompt,
    scale=0.7,
    width=960, height=1280,
    seed=42
)

images[0].save(f"results/{image_name}")
Inferencepythontransformers
import os
from PIL import Image

import torch
import torch.nn as nn

from pipeline_flux_ipa import FluxPipeline
from transformer_flux import FluxTransformer2DModel
from attention_processor import IPAFluxAttnProcessor2_0
from transformers import AutoProcessor, SiglipVisionModel

from infer_flux_ipa_siglip import resize_img, MLPProjModel, IPAdapter

image_encoder_path = "google/siglip-so400m-patch14-384"
ipadapter_path = "./ip-adapter.bin"
    
transformer = FluxTransformer2DModel.from_pretrained(
    "black-forest-labs/FLUX.1-dev", subfolder="transformer", torch_dtype=torch.bfloat16
)

pipe = FluxPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
)

ip_model = IPAdapter(pipe, image_encoder_path, ipadapter_path, device="cuda", num_tokens=128)

image_dir = "./assets/images/2.jpg"
image_name = image_dir.split("/")[-1]
image = Image.open(image_dir).convert("RGB")
image = resize_img(image)

prompt = "a young girl"
    
images = ip_model.generate(
    pil_image=image, 
    prompt=prompt,
    scale=0.7,
    width=960, height=1280,
    seed=42
)

images[0].save(f"results/{image_name}")
Inferencepythontransformers
import os
from PIL import Image

import torch
import torch.nn as nn

from pipeline_flux_ipa import FluxPipeline
from transformer_flux import FluxTransformer2DModel
from attention_processor import IPAFluxAttnProcessor2_0
from transformers import AutoProcessor, SiglipVisionModel

from infer_flux_ipa_siglip import resize_img, MLPProjModel, IPAdapter

image_encoder_path = "google/siglip-so400m-patch14-384"
ipadapter_path = "./ip-adapter.bin"
    
transformer = FluxTransformer2DModel.from_pretrained(
    "black-forest-labs/FLUX.1-dev", subfolder="transformer", torch_dtype=torch.bfloat16
)

pipe = FluxPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
)

ip_model = IPAdapter(pipe, image_encoder_path, ipadapter_path, device="cuda", num_tokens=128)

image_dir = "./assets/images/2.jpg"
image_name = image_dir.split("/")[-1]
image = Image.open(image_dir).convert("RGB")
image = resize_img(image)

prompt = "a young girl"
    
images = ip_model.generate(
    pil_image=image, 
    prompt=prompt,
    scale=0.7,
    width=960, height=1280,
    seed=42
)

images[0].save(f"results/{image_name}")
Inferencepythontransformers
import os
from PIL import Image

import torch
import torch.nn as nn

from pipeline_flux_ipa import FluxPipeline
from transformer_flux import FluxTransformer2DModel
from attention_processor import IPAFluxAttnProcessor2_0
from transformers import AutoProcessor, SiglipVisionModel

from infer_flux_ipa_siglip import resize_img, MLPProjModel, IPAdapter

image_encoder_path = "google/siglip-so400m-patch14-384"
ipadapter_path = "./ip-adapter.bin"
    
transformer = FluxTransformer2DModel.from_pretrained(
    "black-forest-labs/FLUX.1-dev", subfolder="transformer", torch_dtype=torch.bfloat16
)

pipe = FluxPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
)

ip_model = IPAdapter(pipe, image_encoder_path, ipadapter_path, device="cuda", num_tokens=128)

image_dir = "./assets/images/2.jpg"
image_name = image_dir.split("/")[-1]
image = Image.open(image_dir).convert("RGB")
image = resize_img(image)

prompt = "a young girl"
    
images = ip_model.generate(
    pil_image=image, 
    prompt=prompt,
    scale=0.7,
    width=960, height=1280,
    seed=42
)

images[0].save(f"results/{image_name}")
Inferencepythontransformers
import os
from PIL import Image

import torch
import torch.nn as nn

from pipeline_flux_ipa import FluxPipeline
from transformer_flux import FluxTransformer2DModel
from attention_processor import IPAFluxAttnProcessor2_0
from transformers import AutoProcessor, SiglipVisionModel

from infer_flux_ipa_siglip import resize_img, MLPProjModel, IPAdapter

image_encoder_path = "google/siglip-so400m-patch14-384"
ipadapter_path = "./ip-adapter.bin"
    
transformer = FluxTransformer2DModel.from_pretrained(
    "black-forest-labs/FLUX.1-dev", subfolder="transformer", torch_dtype=torch.bfloat16
)

pipe = FluxPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
)

ip_model = IPAdapter(pipe, image_encoder_path, ipadapter_path, device="cuda", num_tokens=128)

image_dir = "./assets/images/2.jpg"
image_name = image_dir.split("/")[-1]
image = Image.open(image_dir).convert("RGB")
image = resize_img(image)

prompt = "a young girl"
    
images = ip_model.generate(
    pil_image=image, 
    prompt=prompt,
    scale=0.7,
    width=960, height=1280,
    seed=42
)

images[0].save(f"results/{image_name}")
Inferencepythontransformers
import os
from PIL import Image

import torch
import torch.nn as nn

from pipeline_flux_ipa import FluxPipeline
from transformer_flux import FluxTransformer2DModel
from attention_processor import IPAFluxAttnProcessor2_0
from transformers import AutoProcessor, SiglipVisionModel

from infer_flux_ipa_siglip import resize_img, MLPProjModel, IPAdapter

image_encoder_path = "google/siglip-so400m-patch14-384"
ipadapter_path = "./ip-adapter.bin"
    
transformer = FluxTransformer2DModel.from_pretrained(
    "black-forest-labs/FLUX.1-dev", subfolder="transformer", torch_dtype=torch.bfloat16
)

pipe = FluxPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16
)

ip_model = IPAdapter(pipe, image_encoder_path, ipadapter_path, device="cuda", num_tokens=128)

image_dir = "./assets/images/2.jpg"
image_name = image_dir.split("/")[-1]
image = Image.open(image_dir).convert("RGB")
image = resize_img(image)

prompt = "a young girl"
    
images = ip_model.generate(
    pil_image=image, 
    prompt=prompt,
    scale=0.7,
    width=960, height=1280,
    seed=42
)

images[0].save(f"results/{image_name}")

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.