Foundation-Sec-8B-Instruct

9.4K
53
8.0B
1 language
llama
by
fdtn-ai
Language Model
OTHER
8B params
New
9K downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
18GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
8GB+ RAM

Code Examples

Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))
Import the required librariespythontransformers
# Import the required libraries
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("fdtn-ai/Foundation-Sec-8B-Instruct")

prompt = "CVE-2015-10011 is a vulnerability about OpenDNS OpenResolve improper log output neutralization. What is the corresponding CWE?"

messages = [
    {"role": "user", "content": prompt}
]

model_inputs = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer(model_inputs, return_tensors="pt", add_special_tokens=False)
output = model.generate(**inputs, temperature=0.1, max_new_tokens=250)
resp = tokenizer.batch_decode(output)[0]
print(resp.replace(model_inputs, ""))

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.