NanoLM-0.3B-Instruct-v2
4
300M
1 language
license:gpl-3.0
by
Mxode
Language Model
OTHER
0.3B params
New
4 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
1GB+ RAM
Mobile
Laptop
Server
Quick Summary
License GPL-3.0. Language English.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
1GB+ RAM
Code Examples
How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""How to usepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
model_path = 'Mxode/NanoLM-0.3B-Instruct-v2'
model = AutoModelForCausalLM.from_pretrained(model_path).to('cuda:0', torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def get_response(prompt: str, **kwargs):
generation_args = dict(
max_new_tokens = kwargs.pop("max_new_tokens", 512),
do_sample = kwargs.pop("do_sample", True),
temperature = kwargs.pop("temperature", 0.7),
top_p = kwargs.pop("top_p", 0.8),
top_k = kwargs.pop("top_k", 40),
**kwargs
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(model_inputs.input_ids, **generation_args)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
return response
prompt1 = "Calculate (4 - 1) * 7"
print(get_response(prompt1, do_sample=False))
"""
To calculate the expression (4 - 1) * 7, we need to follow the order of operations (PEMDAS):
1. Evaluate the expression inside the parentheses: 4 - 1 = 3
2. Multiply 3 by 7: 3 * 7 = 21
So, (4 - 1) * 7 = 21.
"""Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.