Gpt Oss Coder 20b
311
8
20.0B
1 language
license:apache-2.0
by
yasserrmd
Language Model
OTHER
20B params
New
311 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
45GB+ RAM
Mobile
Laptop
Server
Quick Summary
This model is a fine-tuned version of OpenAI's GPT-OSS-20B, optimized for code generation tasks.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
19GB+ RAM
Code Examples
Usage Examplepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import TextStreamer
tokenizer = AutoTokenizer.from_pretrained("yasserrmd/gpt-oss-coder-20b")
model = AutoModelForCausalLM.from_pretrained("yasserrmd/gpt-oss-coder-20b")
messages = [
{"role": "system", "content": "You are a helpful coding assistant."},
{"role": "user", "content": "Using Python to connect MySQL and retrieve table 'employee' where empno is 1234."},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True,
reasoning_effort="low",
).to(model.device)
streamer = TextStreamer(tokenizer)
_ = model.generate(**inputs, max_new_tokens=512, streamer=streamer)Usage Examplepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import TextStreamer
tokenizer = AutoTokenizer.from_pretrained("yasserrmd/gpt-oss-coder-20b")
model = AutoModelForCausalLM.from_pretrained("yasserrmd/gpt-oss-coder-20b")
messages = [
{"role": "system", "content": "You are a helpful coding assistant."},
{"role": "user", "content": "Using Python to connect MySQL and retrieve table 'employee' where empno is 1234."},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True,
reasoning_effort="low",
).to(model.device)
streamer = TextStreamer(tokenizer)
_ = model.generate(**inputs, max_new_tokens=512, streamer=streamer)Usage Examplepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import TextStreamer
tokenizer = AutoTokenizer.from_pretrained("yasserrmd/gpt-oss-coder-20b")
model = AutoModelForCausalLM.from_pretrained("yasserrmd/gpt-oss-coder-20b")
messages = [
{"role": "system", "content": "You are a helpful coding assistant."},
{"role": "user", "content": "Using Python to connect MySQL and retrieve table 'employee' where empno is 1234."},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True,
reasoning_effort="low",
).to(model.device)
streamer = TextStreamer(tokenizer)
_ = model.generate(**inputs, max_new_tokens=512, streamer=streamer)Usage Examplepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import TextStreamer
tokenizer = AutoTokenizer.from_pretrained("yasserrmd/gpt-oss-coder-20b")
model = AutoModelForCausalLM.from_pretrained("yasserrmd/gpt-oss-coder-20b")
messages = [
{"role": "system", "content": "You are a helpful coding assistant."},
{"role": "user", "content": "Using Python to connect MySQL and retrieve table 'employee' where empno is 1234."},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True,
reasoning_effort="low",
).to(model.device)
streamer = TextStreamer(tokenizer)
_ = model.generate(**inputs, max_new_tokens=512, streamer=streamer)Usage Examplepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import TextStreamer
tokenizer = AutoTokenizer.from_pretrained("yasserrmd/gpt-oss-coder-20b")
model = AutoModelForCausalLM.from_pretrained("yasserrmd/gpt-oss-coder-20b")
messages = [
{"role": "system", "content": "You are a helpful coding assistant."},
{"role": "user", "content": "Using Python to connect MySQL and retrieve table 'employee' where empno is 1234."},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True,
reasoning_effort="low",
).to(model.device)
streamer = TextStreamer(tokenizer)
_ = model.generate(**inputs, max_new_tokens=512, streamer=streamer)Usage Examplepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import TextStreamer
tokenizer = AutoTokenizer.from_pretrained("yasserrmd/gpt-oss-coder-20b")
model = AutoModelForCausalLM.from_pretrained("yasserrmd/gpt-oss-coder-20b")
messages = [
{"role": "system", "content": "You are a helpful coding assistant."},
{"role": "user", "content": "Using Python to connect MySQL and retrieve table 'employee' where empno is 1234."},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True,
reasoning_effort="low",
).to(model.device)
streamer = TextStreamer(tokenizer)
_ = model.generate(**inputs, max_new_tokens=512, streamer=streamer)Usage Examplepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import TextStreamer
tokenizer = AutoTokenizer.from_pretrained("yasserrmd/gpt-oss-coder-20b")
model = AutoModelForCausalLM.from_pretrained("yasserrmd/gpt-oss-coder-20b")
messages = [
{"role": "system", "content": "You are a helpful coding assistant."},
{"role": "user", "content": "Using Python to connect MySQL and retrieve table 'employee' where empno is 1234."},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True,
reasoning_effort="low",
).to(model.device)
streamer = TextStreamer(tokenizer)
_ = model.generate(**inputs, max_new_tokens=512, streamer=streamer)Usage Examplepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import TextStreamer
tokenizer = AutoTokenizer.from_pretrained("yasserrmd/gpt-oss-coder-20b")
model = AutoModelForCausalLM.from_pretrained("yasserrmd/gpt-oss-coder-20b")
messages = [
{"role": "system", "content": "You are a helpful coding assistant."},
{"role": "user", "content": "Using Python to connect MySQL and retrieve table 'employee' where empno is 1234."},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt",
return_dict=True,
reasoning_effort="low",
).to(model.device)
streamer = TextStreamer(tokenizer)
_ = model.generate(**inputs, max_new_tokens=512, streamer=streamer)Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.