DeepSeek-V3.2-Speciale-GGUF
3
ik_llama.cpp
by
ubergarm
Language Model
OTHER
New
0 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Code Examples
IQ5_K 464.467 GiB (5.946 BPW)bashllama.cpp
#!/usr/bin/env bash
custom="
## Attention [0-60] (GPU)
# attn_kv_b is only used for PP so keep it q8_0 for best speed and accuracy
blk\..*\.attn_kv_b\.weight=q8_0
# ideally k_b and v_b are smaller than q8_0 as they are is used for TG with -mla 3
# https://github.com/ikawrakow/ik_llama.cpp/issues/651
# blk.*.attn_k_b.weight is not divisible by 256 so only supports iq4_nl or legacy qN_0
blk\..*\.attn_k_b\.weight=q8_0
blk\..*\.attn_v_b\.weight=q8_0
# Balance of attn tensors
blk\..*\.attn_kv_a_mqa\.weight=q8_0
blk\..*\.attn_q_a\.weight=q8_0
blk\..*\.attn_q_b\.weight=q8_0
blk\..*\.attn_output\.weight=q8_0
## First Three Dense Layers [0-2] (GPU)
blk\..*\.ffn_down\.weight=q8_0
blk\..*\.ffn_(gate|up)\.weight=q8_0
## Shared Expert (1-60) (GPU)
blk\..*\.ffn_down_shexp\.weight=q8_0
blk\..*\.ffn_(gate|up)_shexp\.weight=q8_0
## Routed Experts (1-60) (CPU)
blk\..*\.ffn_down_exps\.weight=iq6_k
blk\..*\.ffn_(gate|up)_exps\.weight=iq5_k
## Token embedding and output tensors (GPU)
token_embd\.weight=q8_0
output\.weight=q8_0
"
custom=$(
echo "$custom" | grep -v '^#' | \
sed -Ez 's:\n+:,:g;s:,$::;s:^,::'
)
numactl -N ${SOCKET} -m ${SOCKET} \
./build/bin/llama-quantize \
--custom-q "$custom" \
--imatrix /mnt/data/models/ubergarm/DeepSeek-V3.2-Speciale-GGUF/imatrix-DeepSeek-V3.2-Speciale-Q8_0.dat \
/mnt/data/models/ubergarm/DeepSeek-V3.2-Speciale-GGUF/DeepSeek-V3.2-Speciale-256x20B-safetensors-BF16-00001-of-00030.gguf \
/mnt/data/models/ubergarm/DeepSeek-V3.2-Speciale-GGUF/DeepSeek-V3.2-Speciale-IQ5_K.gguf \
IQ5_K \
128IQ3_K 290.897 GiB (3.724 BPW)bash
#!/usr/bin/env bash
custom="
## Attention [0-60] (GPU)
blk\..*\.attn_k_b\.weight=q8_0
blk\..*\.attn_v_b\.weight=q8_0
# Balance of attn tensors
blk\..*\.attn_kv_a_mqa\.weight=q8_0
blk\..*\.attn_q_a\.weight=q8_0
blk\..*\.attn_q_b\.weight=q8_0
blk\..*\.attn_output\.weight=iq6_k
## First Three Dense Layers [0-2] (GPU)
blk\.0\.ffn_down\.weight=q8_0
blk\.0\.ffn_(gate|up)\.weight=q8_0
blk\..*\.ffn_down\.weight=q8_0
blk\..*\.ffn_(gate|up)\.weight=iq6_k
## Shared Expert [3-60] (GPU)
blk\..*\.ffn_down_shexp\.weight=q8_0
blk\..*\.ffn_(gate|up)_shexp\.weight=iq6_k
## Routed Experts [3-60] (CPU)
blk\..*\.ffn_down_exps\.weight=iq4_kss
blk\..*\.ffn_(gate|up)_exps\.weight=iq3_k
## Token embedding and output tensors (GPU)
token_embd\.weight=iq4_k
output\.weight=iq6_k
"
custom=$(
echo "$custom" | grep -v '^#' | \
sed -Ez 's:\n+:,:g;s:,$::;s:^,::'
)
numactl -N ${SOCKET} -m ${SOCKET} \
./build/bin/llama-quantize \
--custom-q "$custom" \
--imatrix /mnt/data/models/ubergarm/DeepSeek-V3.2-Speciale-GGUF/imatrix-DeepSeek-V3.2-Speciale-Q8_0.dat \
/mnt/data/models/ubergarm/DeepSeek-V3.2-Speciale-GGUF/DeepSeek-V3.2-Speciale-256x20B-safetensors-BF16-00001-of-00030.gguf \
/mnt/data/models/ubergarm/DeepSeek-V3.2-Speciale-GGUF/DeepSeek-V3.2-Speciale-IQ3_K.gguf \
IQ3_K \
128smol-IQ1_KT 146.165 GiB (1.871 BPW)bash
#!/usr/bin/env bash
custom="
## Attention [0-60] (GPU)
blk\..*\.attn_k_b\.weight=q6_0
blk\..*\.attn_v_b\.weight=iq6_k
# Balance of attn tensors
blk\..*\.attn_kv_a_mqa\.weight=iq6_k
blk\..*\.attn_q_a\.weight=iq6_k
blk\..*\.attn_q_b\.weight=iq6_k
blk\..*\.attn_output\.weight=iq6_k
## First Three Dense Layers [0-2] (GPU)
blk\..*\.ffn_down\.weight=iq5_ks
blk\..*\.ffn_(gate|up)\.weight=iq4_kss
## Shared Expert [3-60] (GPU)
blk\..*\.ffn_down_shexp\.weight=iq5_ks
blk\..*\.ffn_(gate|up)_shexp\.weight=iq4_kss
## Routed Experts [3-60] (CPU)
blk\..*\.ffn_down_exps\.weight=iq1_kt
blk\..*\.ffn_(gate|up)_exps\.weight=iq1_kt
## Token embedding and output tensors (GPU)
token_embd\.weight=iq4_k
output\.weight=iq6_k
"
custom=$(
echo "$custom" | grep -v '^#' | \
sed -Ez 's:\n+:,:g;s:,$::;s:^,::'
)
numactl -N ${SOCKET} -m ${SOCKET} \
./build/bin/llama-quantize \
--custom-q "$custom" \
--imatrix /mnt/data/models/ubergarm/DeepSeek-V3.2-Speciale-GGUF/imatrix-DeepSeek-V3.2-Speciale-Q8_0.dat \
/mnt/data/models/ubergarm/DeepSeek-V3.2-Speciale-GGUF/DeepSeek-V3.2-Speciale-256x20B-safetensors-BF16-00001-of-00030.gguf \
/mnt/data/models/ubergarm/DeepSeek-V3.2-Speciale-GGUF/DeepSeek-V3.2-Speciale-smol-IQ1_KT.gguf \
IQ1_KT \
128Clone and checkoutbashllama.cpp
# Clone and checkout
$ git clone https://github.com/ikawrakow/ik_llama.cpp
$ cd ik_llama.cpp
# Build for hybrid CPU+CUDA
$ cmake -B build -DCMAKE_BUILD_TYPE=Release -DGGML_CUDA=ON
$ cmake --build build --config Release -j $(nproc)
# Run Hybrid CPU + 2x CUDA GPUs (48GB VRAM each older RTX A6000 non-PROs)
## no -sm graph for DeepSeek yet. Is there an easy way to disable thinking or need to prefill response?
## might be better way to do this with --n-cpu-moe 46 -ts 48,48 etc...
./build/bin/llama-server \
--model "$model" \
--alias ubergarm/DeepSeek-V3.2-Speciale-GGUF \
--ctx-size 32768 \
-ctk q8_0 \
-ger \
--merge-qkv \
-mla 3 -amb 1024 \
-ot "blk\.(3|4|5|6|7|8|9|10)\.ffn_(gate|up|down)_exps.*=CUDA0" \
-ot "blk\.(52|53|54|55|56|57|58|59|60)\.ffn_(gate|up|down)_exps.*=CUDA1" \
--cpu-moe \
-ub 4096 -b 4096 \
--threads 24 \
--host 127.0.0.1 \
--port 8080 \
--no-mmap \
--jinja
# CPU Only
numactl -N ${SOCKET} -m ${SOCKET} \
./build/bin/llama-server \
--model "$model"\
--alias ubergarm/DeepSeek-V3.2-Speciale-GGUF \
--merge-qkv \
--ctx-size 131072 \
-ctk q8_0 \
-mla 3 \
--parallel 1 \
--threads 96 \
--threads-batch 128 \
--numa numactl \
--host 127.0.0.1 \
--port 8080 \
--no-mmap \
--jinja
# --validate-quantsDeploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.