Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora
87
500M
2 languages
license:apache-2.0
by
codelion
Language Model
OTHER
0.5B params
New
87 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
2GB+ RAM
Mobile
Laptop
Server
Quick Summary
codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora This LoRA adapter enhances Qwen/Qwen2.5-Coder-0.5B-Instruct to generate secure code by default, trained...
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
1GB+ RAM
Code Examples
🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)🔧 Usagepythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Load base model
model = AutoModelForCausalLM.from_pretrained(
"Qwen/Qwen2.5-Coder-0.5B-Instruct",
torch_dtype="auto",
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-0.5B-Instruct")
# Load security LoRA adapter
model = PeftModel.from_pretrained(model, "codelion/Qwen2.5-Coder-0.5B-Instruct-security-grpo-lora")
# Generate secure code
prompt = '''Write a secure Python function: Create a user login function
that checks username and password against a database'''
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.2)
secure_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(secure_code)Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.