Nanbeige4-3B-Thinking-2510
149
2
3.0B
3 languages
llama
by
Nanbeige
Language Model
OTHER
3B params
New
149 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
7GB+ RAM
Mobile
Laptop
Server
Quick Summary
Nanbeige4-3B-Thinking is a 3B-parameter reasoning model within the fourth-generation Nanbeige LLM family.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
3GB+ RAM
Code Examples
<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)<span id="Inference">4. Quickstart</span>texttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
use_fast=False,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
'Nanbeige/Nanbeige4-3B-Thinking-2510',
torch_dtype='auto',
device_map='auto',
trust_remote_code=True
)
messages = [
{'role': 'user', 'content': 'Which number is bigger, 9.11 or 9.8?'}
]
prompt = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=False
)
input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids
output_ids = model.generate(input_ids.to('cuda'), eos_token_id=166101)
resp = tokenizer.decode(output_ids[0][len(input_ids[0]):], skip_special_tokens=True)
print(resp)Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.