waon-siglip2-base-patch16-256
14
1
license:apache-2.0
by
llm-jp
Image Model
OTHER
2510.22276B params
New
14 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
5611GB+ RAM
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
2338GB+ RAM
Code Examples
How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%How to Usepythontransformers
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModel
ckpt = "llm-jp/waon-siglip2-base-patch16-256"
model = AutoModel.from_pretrained(ckpt)
processor = AutoProcessor.from_pretrained(ckpt)
url = "https://upload.wikimedia.org/wikipedia/commons/5/58/Shiba_inu_taiki.jpg"
image = Image.open(requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}).raw).convert("RGB")
candidate_labels = ["柴犬", "日本猫", "いわし"]
# IMPORTANT: we pass `padding=max_length` and `max_length=64` since the model was trained with this
inputs = processor(text=candidate_labels, images=image, padding="max_length", max_length=64, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image)
for i, label in enumerate(candidate_labels):
print(f"prob that image is '{label}': {probs[0][i]:.2%}")
# prob that image is '柴犬': 96.57%
# prob that image is '日本猫': 0.03%
# prob that image is 'いわし': 0.00%Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.