rl-d20

35
100.0B
1 language
license:apache-2.0
by
nanochat-students
Code Model
OTHER
100B params
New
35 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
224GB+ RAM
Mobile
Laptop
Server
Quick Summary

This is the RL trained checkpoint from Andrej Karpathy's fullstack llm project to build an LLM, nanochat.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
94GB+ RAM

Code Examples

Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))
Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer


model_name = "nanochat-students/rl-d20"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True).to(device)
model.eval()

conversation = [
    {"role": "user", "content": "Hello, who are you?"},
]
rendered = tokenizer.apply_chat_template(
    conversation,
    tokenize=False,
    add_generation_prompt=True,
)
model_inputs = tokenizer([rendered], return_tensors="pt").to(model.device)

generated = model.generate(**model_inputs, max_new_tokens=256)
output_ids = generated[0, model_inputs.input_ids.shape[1]:]
print(tokenizer.decode(output_ids, skip_special_tokens=True))

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.