AM-Thinking-v1-GGUF

408
1
BF16
license:apache-2.0
by
Mungert
Language Model
OTHER
New
408 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Code Examples

3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")
3) Writingpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "a-m-team/AM-Thinking-v1"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

prompt = "How can I find inner peace?"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=49152
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

response = tokenizer.decode(output_ids, skip_special_tokens=True)
think_content = response.split("<think>")[1].split("</think>")[0]
answer_content = response.split("<answer>")[1].split("</answer>")[0]

print (f"user prompt: {prompt}")
print (f"model thinking: {think_content}")
print (f"model answer: {answer_content}")

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.