llm-jp-3.1-13b
616
2
2 languages
llama
by
llm-jp
Language Model
OTHER
13B params
New
616 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
30GB+ RAM
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
13GB+ RAM
Training Data Analysis
π΅ Good (6.0/10)
Researched training datasets used by llm-jp-3.1-13b with quality assessment
Specialized For
general
multilingual
Training Datasets (1)
c4
π΅ 6/10
general
multilingual
Key Strengths
- β’Scale and Accessibility: 750GB of publicly available, filtered text
- β’Systematic Filtering: Documented heuristics enable reproducibility
- β’Language Diversity: Despite English-only, captures diverse writing styles
Considerations
- β’English-Only: Limits multilingual applications
- β’Filtering Limitations: Offensive content and low-quality text remain despite filtering
Explore our comprehensive training dataset analysis
View All DatasetsCode Examples
Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Required Libraries and Their Versionspythontransformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-3.1-13b")
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-3.1-13b", device_map="auto", torch_dtype=torch.bfloat16)
text = "θͺηΆθ¨θͺε¦ηγ¨γ―δ½γ"
tokenized_input = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt").to(model.device)
with torch.no_grad():
output = model.generate(
tokenized_input,
max_new_tokens=100,
do_sample=True,
top_p=0.95,
temperature=0.7,
repetition_penalty=1.05,
)[0]
print(tokenizer.decode(output))Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.