OpenCoder-1.5B-Base

157
23
1.5B
llama
by
infly
Language Model
OTHER
1.5B params
New
157 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
4GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
2GB+ RAM

Code Examples

5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)
5. Inferencepythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_name = "infly/OpenCoder-1.5B-Base"
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             device_map="auto",
                                             trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)


inputs = tokenizer("# write a quick sort algorithm", return_tensors="pt")
outputs = model.generate(**inputs.to(model.device), max_new_tokens=128)

result = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(result)

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.