L3-Rhaenys-8B

6
8.0B
1 language
llama
by
tannedbum
Language Model
OTHER
8B params
New
0 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
18GB+ RAM
Mobile
Laptop
Server
Quick Summary

3.0 Farewell model. Next i'm going to wait Sao10K to break the bank again with a new 3.1 RP base. This is a merge of pre-trained language models created using...

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
8GB+ RAM

Code Examples

SillyTaverntext
temp 0.9
top_k 30
top_p 0.75
min_p 0.2
rep_pen 1.1
smooth_factor 0.25
smooth_curve 1
SillyTaverntext
temp 0.9
top_k 30
top_p 0.75
min_p 0.2
rep_pen 1.1
smooth_factor 0.25
smooth_curve 1
SillyTaverntext
temp 0.9
top_k 30
top_p 0.75
min_p 0.2
rep_pen 1.1
smooth_factor 0.25
smooth_curve 1
SillyTaverntext
temp 0.9
top_k 30
top_p 0.75
min_p 0.2
rep_pen 1.1
smooth_factor 0.25
smooth_curve 1
SillyTaverntext
temp 0.9
top_k 30
top_p 0.75
min_p 0.2
rep_pen 1.1
smooth_factor 0.25
smooth_curve 1
SillyTaverntext
temp 0.9
top_k 30
top_p 0.75
min_p 0.2
rep_pen 1.1
smooth_factor 0.25
smooth_curve 1
SillyTaverntext
temp 0.9
top_k 30
top_p 0.75
min_p 0.2
rep_pen 1.1
smooth_factor 0.25
smooth_curve 1
Configurationyaml
slices:
  - sources:
      - model: Sao10K/L3-8B-Niitama-v1
        layer_range: [0, 32]
      - model: Sao10K/L3-8B-Stheno-v3.2
        layer_range: [0, 32]
merge_method: slerp
base_model: Sao10K/L3-8B-Niitama-v1
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16


slices:
  - sources:
      - model: tannedbum/L3-Niitama-Stheno-8B
        layer_range: [0, 32]
      - model: princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2
        layer_range: [0, 32]
merge_method: slerp
base_model: tannedbum/L3-Niitama-Stheno-8B
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16
Configurationyaml
slices:
  - sources:
      - model: Sao10K/L3-8B-Niitama-v1
        layer_range: [0, 32]
      - model: Sao10K/L3-8B-Stheno-v3.2
        layer_range: [0, 32]
merge_method: slerp
base_model: Sao10K/L3-8B-Niitama-v1
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16


slices:
  - sources:
      - model: tannedbum/L3-Niitama-Stheno-8B
        layer_range: [0, 32]
      - model: princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2
        layer_range: [0, 32]
merge_method: slerp
base_model: tannedbum/L3-Niitama-Stheno-8B
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16
Configurationyaml
slices:
  - sources:
      - model: Sao10K/L3-8B-Niitama-v1
        layer_range: [0, 32]
      - model: Sao10K/L3-8B-Stheno-v3.2
        layer_range: [0, 32]
merge_method: slerp
base_model: Sao10K/L3-8B-Niitama-v1
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16


slices:
  - sources:
      - model: tannedbum/L3-Niitama-Stheno-8B
        layer_range: [0, 32]
      - model: princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2
        layer_range: [0, 32]
merge_method: slerp
base_model: tannedbum/L3-Niitama-Stheno-8B
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16
Configurationyaml
slices:
  - sources:
      - model: Sao10K/L3-8B-Niitama-v1
        layer_range: [0, 32]
      - model: Sao10K/L3-8B-Stheno-v3.2
        layer_range: [0, 32]
merge_method: slerp
base_model: Sao10K/L3-8B-Niitama-v1
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16


slices:
  - sources:
      - model: tannedbum/L3-Niitama-Stheno-8B
        layer_range: [0, 32]
      - model: princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2
        layer_range: [0, 32]
merge_method: slerp
base_model: tannedbum/L3-Niitama-Stheno-8B
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16
Configurationyaml
slices:
  - sources:
      - model: Sao10K/L3-8B-Niitama-v1
        layer_range: [0, 32]
      - model: Sao10K/L3-8B-Stheno-v3.2
        layer_range: [0, 32]
merge_method: slerp
base_model: Sao10K/L3-8B-Niitama-v1
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16


slices:
  - sources:
      - model: tannedbum/L3-Niitama-Stheno-8B
        layer_range: [0, 32]
      - model: princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2
        layer_range: [0, 32]
merge_method: slerp
base_model: tannedbum/L3-Niitama-Stheno-8B
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16
Configurationyaml
slices:
  - sources:
      - model: Sao10K/L3-8B-Niitama-v1
        layer_range: [0, 32]
      - model: Sao10K/L3-8B-Stheno-v3.2
        layer_range: [0, 32]
merge_method: slerp
base_model: Sao10K/L3-8B-Niitama-v1
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16


slices:
  - sources:
      - model: tannedbum/L3-Niitama-Stheno-8B
        layer_range: [0, 32]
      - model: princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2
        layer_range: [0, 32]
merge_method: slerp
base_model: tannedbum/L3-Niitama-Stheno-8B
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16
Configurationyaml
slices:
  - sources:
      - model: Sao10K/L3-8B-Niitama-v1
        layer_range: [0, 32]
      - model: Sao10K/L3-8B-Stheno-v3.2
        layer_range: [0, 32]
merge_method: slerp
base_model: Sao10K/L3-8B-Niitama-v1
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16


slices:
  - sources:
      - model: tannedbum/L3-Niitama-Stheno-8B
        layer_range: [0, 32]
      - model: princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2
        layer_range: [0, 32]
merge_method: slerp
base_model: tannedbum/L3-Niitama-Stheno-8B
parameters:
  t:
    - filter: self_attn
      value: [0.2, 0.4, 0.6, 0.2, 0.4]
    - filter: mlp
      value: [0.8, 0.6, 0.4, 0.8, 0.6]
    - value: 0.4
dtype: bfloat16

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.