Qwen3 0.6B SFT Name Parser Yaml
233
4
600M
4 languages
license:apache-2.0
by
small-models-for-glam
Language Model
OTHER
0.6B params
New
233 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
2GB+ RAM
Mobile
Laptop
Server
Quick Summary
This model is a fine-tuned version of Qwen/Qwen3-0.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
1GB+ RAM
Code Examples
Quick Startpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "small-models-for-glam/Qwen3-0.6B-SFT-name-parser-yaml"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
# Parse a person name
input_name = "Dr. Jane Smith-Jones, 1850-1920"
prompt = "Parse this person name:\n\n" + input_name
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=512)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
# Parse thinking content if present
try:
index = len(output_ids) - output_ids[::-1].index(151668) # </think> token
except ValueError:
index = 0
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip()
print(content)Quick Startpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "small-models-for-glam/Qwen3-0.6B-SFT-name-parser-yaml"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
# Parse a person name
input_name = "Dr. Jane Smith-Jones, 1850-1920"
prompt = "Parse this person name:\n\n" + input_name
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=512)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
# Parse thinking content if present
try:
index = len(output_ids) - output_ids[::-1].index(151668) # </think> token
except ValueError:
index = 0
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip()
print(content)Quick Startpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "small-models-for-glam/Qwen3-0.6B-SFT-name-parser-yaml"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
# Parse a person name
input_name = "Dr. Jane Smith-Jones, 1850-1920"
prompt = "Parse this person name:\n\n" + input_name
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=512)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
# Parse thinking content if present
try:
index = len(output_ids) - output_ids[::-1].index(151668) # </think> token
except ValueError:
index = 0
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip()
print(content)Quick Startpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "small-models-for-glam/Qwen3-0.6B-SFT-name-parser-yaml"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
# Parse a person name
input_name = "Dr. Jane Smith-Jones, 1850-1920"
prompt = "Parse this person name:\n\n" + input_name
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=512)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
# Parse thinking content if present
try:
index = len(output_ids) - output_ids[::-1].index(151668) # </think> token
except ValueError:
index = 0
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip()
print(content)Quick Startpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "small-models-for-glam/Qwen3-0.6B-SFT-name-parser-yaml"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
# Parse a person name
input_name = "Dr. Jane Smith-Jones, 1850-1920"
prompt = "Parse this person name:\n\n" + input_name
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=512)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
# Parse thinking content if present
try:
index = len(output_ids) - output_ids[::-1].index(151668) # </think> token
except ValueError:
index = 0
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip()
print(content)Quick Startpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "small-models-for-glam/Qwen3-0.6B-SFT-name-parser-yaml"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
# Parse a person name
input_name = "Dr. Jane Smith-Jones, 1850-1920"
prompt = "Parse this person name:\n\n" + input_name
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=512)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
# Parse thinking content if present
try:
index = len(output_ids) - output_ids[::-1].index(151668) # </think> token
except ValueError:
index = 0
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip()
print(content)Quick Startpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "small-models-for-glam/Qwen3-0.6B-SFT-name-parser-yaml"
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype="auto",
device_map="auto"
)
# Parse a person name
input_name = "Dr. Jane Smith-Jones, 1850-1920"
prompt = "Parse this person name:\n\n" + input_name
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
generated_ids = model.generate(**model_inputs, max_new_tokens=512)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
# Parse thinking content if present
try:
index = len(output_ids) - output_ids[::-1].index(151668) # </think> token
except ValueError:
index = 0
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip()
print(content)Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.