LIMO-v2
124
2
license:apache-2.0
by
GAIR
Other
OTHER
2502.03387B params
New
124 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
5593GB+ RAM
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
2331GB+ RAM
Code Examples
Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Using HF Transformerspythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Initialize model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"GAIR/LIMO-v2",
torch_dtype="auto",
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("GAIR/LIMO-v2", trust_remote_code=True)
# Prepare input messages
messages = [
{"role": "system", "content": "Please reason step by step, and put your final answer within \\boxed{}."},
{"role": "user", "content": "What is the result of 1+1?"}
]
# Format input using chat template
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Tokenize input
inputs = tokenizer(text, return_tensors="pt").to(model.device)
# Generate response
outputs = model.generate(
**inputs,
max_new_tokens=32768,
temperature=0.7,
top_p=0.95,
do_sample=True
)
# Decode and print response
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
print(response)Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.