Shisa-v2-Mistral-Nemo-12B-Lorablated

2
12.0B
2 languages
by
yamatazen
Language Model
OTHER
12B params
New
0 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
27GB+ RAM
Mobile
Laptop
Server
Quick Summary

- Base Model: `shisa-ai/shisa-v2-mistral-nemo-12b` - LoRA Adapter: `nbeerbower/Mistral-Nemo-12B-abliterated-LORA` The model is saved in `bfloat16` format and is ready for deployment or fine-tuning.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
12GB+ RAM

Code Examples

Code for LoRA merging (Generated by Qwen3)pythontransformers
import argparse
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel

def main():
    parser = argparse.ArgumentParser(description="Merge a LoRA into a large language model.")
    parser.add_argument("--model_name", type=str, required=True, help="Name or path of the base model.")
    parser.add_argument("--lora_name", type=str, required=True, help="Name or path of the LoRA adapter.")
    parser.add_argument("--output_model_name", type=str, required=True, help="Output directory for the merged model.")
    args = parser.parse_args()

    # Load the base model in bfloat16
    print("Loading base model...")
    base_model = AutoModelForCausalLM.from_pretrained(
        args.model_name,
        torch_dtype="bfloat16"
    )

    # Load the LoRA adapter
    print("Loading LoRA adapter...")
    peft_model = PeftModel.from_pretrained(base_model, args.lora_name)

    # Merge and unload the LoRA weights into the base model
    print("Merging LoRA into base model...")
    merged_model = peft_model.merge_and_unload()

    # Save the merged model
    print(f"Saving merged model to {args.output_model_name}...")
    merged_model.save_pretrained(args.output_model_name)

    # Save the tokenizer from the base model
    tokenizer = AutoTokenizer.from_pretrained(args.model_name)
    tokenizer.save_pretrained(args.output_model_name)

    # Create README.md file
    print("Creating README.md...")
    readme_content = f"""# Merged Model

This model is a combination of:

- **Base Model**: `{args.model_name}`
- **LoRA Adapter**: `{args.lora_name}`

The model is saved in `bfloat16` format and is ready for deployment or fine-tuning.
"""

    readme_path = os.path.join(args.output_model_name, "README.md")
    with open(readme_path, "w") as f:
        f.write(readme_content)

    print("✅ Merging complete. Model saved to:", args.output_model_name)

if __name__ == "__main__":
    main()
Code for LoRA merging (Generated by Qwen3)pythontransformers
import argparse
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel

def main():
    parser = argparse.ArgumentParser(description="Merge a LoRA into a large language model.")
    parser.add_argument("--model_name", type=str, required=True, help="Name or path of the base model.")
    parser.add_argument("--lora_name", type=str, required=True, help="Name or path of the LoRA adapter.")
    parser.add_argument("--output_model_name", type=str, required=True, help="Output directory for the merged model.")
    args = parser.parse_args()

    # Load the base model in bfloat16
    print("Loading base model...")
    base_model = AutoModelForCausalLM.from_pretrained(
        args.model_name,
        torch_dtype="bfloat16"
    )

    # Load the LoRA adapter
    print("Loading LoRA adapter...")
    peft_model = PeftModel.from_pretrained(base_model, args.lora_name)

    # Merge and unload the LoRA weights into the base model
    print("Merging LoRA into base model...")
    merged_model = peft_model.merge_and_unload()

    # Save the merged model
    print(f"Saving merged model to {args.output_model_name}...")
    merged_model.save_pretrained(args.output_model_name)

    # Save the tokenizer from the base model
    tokenizer = AutoTokenizer.from_pretrained(args.model_name)
    tokenizer.save_pretrained(args.output_model_name)

    # Create README.md file
    print("Creating README.md...")
    readme_content = f"""# Merged Model

This model is a combination of:

- **Base Model**: `{args.model_name}`
- **LoRA Adapter**: `{args.lora_name}`

The model is saved in `bfloat16` format and is ready for deployment or fine-tuning.
"""

    readme_path = os.path.join(args.output_model_name, "README.md")
    with open(readme_path, "w") as f:
        f.write(readme_content)

    print("✅ Merging complete. Model saved to:", args.output_model_name)

if __name__ == "__main__":
    main()
Code for LoRA merging (Generated by Qwen3)pythontransformers
import argparse
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel

def main():
    parser = argparse.ArgumentParser(description="Merge a LoRA into a large language model.")
    parser.add_argument("--model_name", type=str, required=True, help="Name or path of the base model.")
    parser.add_argument("--lora_name", type=str, required=True, help="Name or path of the LoRA adapter.")
    parser.add_argument("--output_model_name", type=str, required=True, help="Output directory for the merged model.")
    args = parser.parse_args()

    # Load the base model in bfloat16
    print("Loading base model...")
    base_model = AutoModelForCausalLM.from_pretrained(
        args.model_name,
        torch_dtype="bfloat16"
    )

    # Load the LoRA adapter
    print("Loading LoRA adapter...")
    peft_model = PeftModel.from_pretrained(base_model, args.lora_name)

    # Merge and unload the LoRA weights into the base model
    print("Merging LoRA into base model...")
    merged_model = peft_model.merge_and_unload()

    # Save the merged model
    print(f"Saving merged model to {args.output_model_name}...")
    merged_model.save_pretrained(args.output_model_name)

    # Save the tokenizer from the base model
    tokenizer = AutoTokenizer.from_pretrained(args.model_name)
    tokenizer.save_pretrained(args.output_model_name)

    # Create README.md file
    print("Creating README.md...")
    readme_content = f"""# Merged Model

This model is a combination of:

- **Base Model**: `{args.model_name}`
- **LoRA Adapter**: `{args.lora_name}`

The model is saved in `bfloat16` format and is ready for deployment or fine-tuning.
"""

    readme_path = os.path.join(args.output_model_name, "README.md")
    with open(readme_path, "w") as f:
        f.write(readme_content)

    print("✅ Merging complete. Model saved to:", args.output_model_name)

if __name__ == "__main__":
    main()

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.