Zamba2-1.2B-instruct
53.5K
28
4K
GPT-3 class
1.2B
license:apache-2.0
by
Zyphra
Language Model
OTHER
1.2B params
Fair
54K downloads
Community-tested
Edge AI:
Mobile
Laptop
Server
3GB+ RAM
Mobile
Laptop
Server
Quick Summary
Zamba2-1.2B-instruct is obtained from Zamba2-1.2B by fine-tuning on instruction-following and chat datasets. Specifically: 1. SFT of the base Zamba2-1.2B model...
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
2GB+ RAM
Code Examples
Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Inferencepythontransformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Instantiate model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-1.2B-instruct")
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-1.2B-instruct", device_map="cuda", torch_dtype=torch.bfloat16)
# Format the input as a chat template
prompt = "What factors contributed to the fall of the Roman Empire?"
sample = [{'role': 'user', 'content': prompt}]
chat_sample = tokenizer.apply_chat_template(sample, tokenize=False)
# Tokenize input and generate output
input_ids = tokenizer(chat_sample, return_tensors='pt', add_special_tokens=False).to("cuda")
outputs = model.generate(**input_ids, max_new_tokens=150, return_dict_in_generate=False, output_scores=False, use_cache=True, num_beams=1, do_sample=False)
print((tokenizer.decode(outputs[0])))Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.