SeaLLM-7B-v2.5

16.4K
50
8K
GPT-3 class
7.0B
11 languages
by
SeaLLMs
Language Model
OTHER
7B params
Fair
16K downloads
Community-tested
Edge AI:
Mobile
Laptop
Server
16GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
7GB+ RAM

Code Examples

Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])
Using transformers's chat_templatepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer

device = "cuda" # the device to load the model onto

# use bfloat16 to ensure the best performance.
model = AutoModelForCausalLM.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5", torch_dtype=torch.bfloat16, device_map=device)
tokenizer = AutoTokenizer.from_pretrained("SeaLLMs/SeaLLM-7B-v2.5")

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Hello world"},
    {"role": "assistant", "content": "Hi there, how can I help you today?"},
    {"role": "user", "content": "Explain general relativity in details."}
]

encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True)
print(tokenizer.convert_ids_to_tokens(encodeds[0]))

model_inputs = encodeds.to(device)
model.to(device)

generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.pad_token_id)
decoded = tokenizer.batch_decode(generated_ids)
print(decoded[0])

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.