mergekit-dare_ties-nlzuacx

1
llama
by
mergekit-community
Language Model
OTHER
2306.01708B params
New
1 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
5155GB+ RAM
Mobile
Laptop
Server
Quick Summary

This is a merge of pre-trained language models created using mergekit.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
2148GB+ RAM

Code Examples

Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16
Configurationyaml
models:
  - model: nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
    parameters:
      density: [0.00, 0.00, 0.15, 0.75, 1.00]
      weight: [0.00, 0.00, 0.15, 0.75, 0.90]
  - model: EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1
    parameters:
      density: [0.00, 0.00, 1.00, 0.50, 0.00]
      weight: [0.00, 0.00, 0.75, 0.35, 0.00]
merge_method: dare_ties
base_model: unsloth/Llama-3.3-70B-Instruct
parameters:
  normalize: true
  int8_mask: true
dtype: bfloat16

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.