Kimi-K2.5
1.6M
2.2K
—
by
moonshotai
Image Model
OTHER
High
1.6M downloads
Battle-tested
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Code Examples
Chat Completionpythonvllm
import openai
import base64
import requests
def simple_chat(client: openai.OpenAI, model_name: str):
messages = [
{'role': 'system', 'content': 'You are Kimi, an AI assistant created by Moonshot AI.'},
{
'role': 'user',
'content': [
{'type': 'text', 'text': 'which one is bigger, 9.11 or 9.9? think carefully.'}
],
},
]
response = client.chat.completions.create(
model=model_name, messages=messages, stream=False, max_tokens=4096
)
print('====== Below is reasoning_content in Thinking Mode ======')
print(f'reasoning content: {response.choices[0].message.reasoning_content}')
print('====== Below is response in Thinking Mode ======')
print(f'response: {response.choices[0].message.content}')
# To use instant mode, pass {"thinking" = {"type":"disabled"}}
response = client.chat.completions.create(
model=model_name,
messages=messages,
stream=False,
max_tokens=4096,
extra_body={'thinking': {'type': 'disabled'}}, # this is for official API
# extra_body= {'chat_template_kwargs': {"thinking": False}} # this is for vLLM/SGLang
)
print('====== Below is response in Instant Mode ======')
print(f'response: {response.choices[0].message.content}')Chat Completion with visual contentpythonvllm
import openai
import base64
import requests
def chat_with_image(client: openai.OpenAI, model_name: str):
url = 'https://huggingface.co/moonshotai/Kimi-K2.5/resolve/main/figures/kimi-logo.png'
image_base64 = base64.b64encode(requests.get(url).content).decode()
messages = [
{
'role': 'user',
'content': [
{
'type': 'image_url',
'image_url': {'url': f'data:image/png;base64, {image_base64}'},
},
{'type': 'text', 'text': 'Describe this image in detail.'},
],
}
]
response = client.chat.completions.create(
model=model_name, messages=messages, stream=False, max_tokens=8192
)
print('====== Below is reasoning_content in Thinking Mode ======')
print(f'reasoning content: {response.choices[0].message.reasoning_content}')
print('====== Below is response in Thinking Mode ======')
print(f'response: {response.choices[0].message.content}')
# Also support instant mode if you pass {"thinking" = {"type":"disabled"}}
response = client.chat.completions.create(
model=model_name,
messages=messages,
stream=False,
max_tokens=4096,
extra_body={'thinking': {'type': 'disabled'}}, # this is for official API
# extra_body= {'chat_template_kwargs': {"thinking": False}} # this is for vLLM/SGLang
)
print('====== Below is response in Instant Mode ======')
print(f'response: {response.choices[0].message.content}')
return response.choices[0].message.contentpythonvllm
import openai
import base64
import requests
def chat_with_video(client: openai.OpenAI, model_name:str):
url = 'https://huggingface.co/moonshotai/Kimi-K2.5/resolve/main/figures/demo_video.mp4'
video_base64 = base64.b64encode(requests.get(url).content).decode()
messages = [
{
"role": "user",
"content": [
{
"type": "video_url",
"video_url": {"url": f"data:video/mp4;base64,{video_base64}"},
},
{"type": "text","text": "Describe the video in detail."},
],
}
]
response = client.chat.completions.create(model=model_name, messages=messages)
print('====== Below is reasoning_content in Thinking Mode ======')
print(f'reasoning content: {response.choices[0].message.reasoning_content}')
print('====== Below is response in Thinking Mode ======')
print(f'response: {response.choices[0].message.content}')
# Also support instant mode if pass {"thinking" = {"type":"disabled"}}
response = client.chat.completions.create(
model=model_name,
messages=messages,
stream=False,
max_tokens=4096,
extra_body={'thinking': {'type': 'disabled'}}, # this is for official API
# extra_body= {'chat_template_kwargs': {"thinking": False}} # this is for vLLM/SGLang
)
print('====== Below is response in Instant Mode ======')
print(f'response: {response.choices[0].message.content}')
return response.choices[0].message.contentDeploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.