LinearWriter-12B

20
3
12.0B
2 languages
by
yamatazen
Language Model
OTHER
12B params
New
20 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
27GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
12GB+ RAM

Code Examples

Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]
Configurationyaml
merge_method: linear
dtype: bfloat16
out_dtype: bfloat16
models:
  - model: natong19/Mistral-Nemo-Instruct-2407-abliterated # Uncensor
    parameters:
      weight: 1.0
  - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 # Writing
    parameters:
      weight: [0.25, 0.3, 0.5, 0.6, 0.75]
  - model: Elizezen/Himeyuri-v0.1-12B # Japanese
    parameters:
      weight: [0.25, 0.3, 0.6, 0.3, 0.25]
  - model: shisa-ai/shisa-v2-mistral-nemo-12b # Japanese
    parameters:
      weight: [0.25, 0.3, 0.5, 0.3, 0.25]

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.