Hunyuan-A13B-Instruct-GGUF

1.6K
41
13.0B
BF16
by
unsloth
Other
OTHER
13B params
New
2K downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
30GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
13GB+ RAM

Code Examples

model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
model_name_or_path = "tencent/Hunyuan-A13B-Instruct"pythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
import re

model_name_or_path = os.environ['MODEL_PATH']
# model_name_or_path = "tencent/Hunyuan-A13B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto",trust_remote_code=True)  # You may want to use bfloat16 and/or move to GPU here
messages = [
    {"role": "user", "content": "Write a short summary of the benefits of regular exercise"},
]

text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=True
            )

model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
model_inputs.pop("token_type_ids", None)
outputs = model.generate(**model_inputs, max_new_tokens=4096)


output_text = tokenizer.decode(outputs[0])

think_pattern = r'<think>(.*?)</think>'
think_matches = re.findall(think_pattern, output_text, re.DOTALL)

answer_pattern = r'<answer>(.*?)</answer>'
answer_matches = re.findall(answer_pattern, output_text, re.DOTALL)

think_content = [match.strip() for match in think_matches][0]
answer_content = [match.strip() for match in answer_matches][0]
print(f"thinking_content:{think_content}\n\n")
print(f"answer_content:{answer_content}\n\n")
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )
text
text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            enable_thinking=False
            )

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.