AndesVL 2B Thinking

28
5
2.0B
license:apache-2.0
by
OPPOer
Image Model
OTHER
2B params
New
28 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
5GB+ RAM
Mobile
Laptop
Server
Quick Summary

AndesVL is a suite of mobile-optimized Multimodal Large Language Models (MLLMs) with 0.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
2GB+ RAM

Code Examples

Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)
Quick Startcommandlinetransformers
# require transformers>=4.52.4

import torch
from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor

model_dir = "OPPOer/AndesVL-2B-Thinking"

model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,torch_dtype=torch.bfloat16).cuda()
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
image_processor = CLIPImageProcessor.from_pretrained(model_dir, trust_remote_code=True)

messages = [
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "描述这张图片。"},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": "https://i-blog.csdnimg.cn/blog_migrate/2f4c88e71f7eabe46d062d2f1ec77d10.jpeg" # image/to/path
                            },
                        }
                    ],
                },
        ]
res = model.chat(messages, tokenizer, image_processor, max_new_tokens=1024, do_sample=True, temperature=0.6, thinking=True)
print(res)

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.