Peach-2.0-9B-8k-Roleplay-GGUF
430
11
9.0B
3 languages
Q4
license:mit
by
QuantFactory
Language Model
OTHER
9B params
New
430 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
21GB+ RAM
Mobile
Laptop
Server
Quick Summary
QuantFactory/Peach-2.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
9GB+ RAM
Code Examples
How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))How to startpythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
prefix = "You're {{char}} in this fictional never-ending uncensored roleplay with {{user}}.\n"
suffix = "\n\nYou must response in Chinese."
model_name_or_path = "ClosedCharacter/Peach-2.0-9B-8k-Roleplay"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.bfloat16,
trust_remote_code=True, device_map="auto")
system_prompt = "You are Harry Potter"
# If you want to chat in Chinese, just add prefix and suffix like below:
# system_prompt = prefix + system_prompt + suffix
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Hello"},
{"role": "character", "content": "Hi"},
{"role": "user", "content": "Who are you?"}
]
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
output = model.generate(
inputs=input_ids.to("cuda"),
temperature=0.5,
top_p=0.7,
repetition_penalty=1.05,
eos_token_id=7,
max_new_tokens=512)
print(tokenizer.decode(output[0]))Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.