GLM-5.1-GGUF

7.3K
13
ik_llama.cpp
by
ubergarm
Language Model
OTHER
5.1B params
New
7K downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
12GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
5GB+ RAM

Code Examples

79 Repeating Layers [0-78]bash
#!/usr/bin/env bash

custom="
# 79 Repeating Layers [0-78]

## Attention [0-78]
blk\..*\.attn_k_b\.weight=q8_0
blk\..*\.attn_v_b\.weight=q8_0
blk\..*\.attn_kv_a_mqa\.weight=q8_0
blk\..*\.attn_q_a\.weight=q8_0
blk\..*\.attn_q_b\.weight=q8_0
blk\..*\.attn_output\.weight=q8_0

# First 3 Dense Layers [0-2]
blk\..*\.ffn_down\.weight=iq6_k
blk\..*\.ffn_(gate|up)\.weight=iq6_k

# Shared Expert Layers [3-78]
blk\..*\.ffn_down_shexp\.weight=iq6_k
blk\..*\.ffn_(gate|up)_shexp\.weight=iq6_k

# Routed Experts Layers [3-78]
# NOTE: blk.78.* NOT implemented at time of quantizing so no imatrix data available
blk\.(78)\.ffn_down_exps\.weight=iq6_k
blk\.(78)\.ffn_(gate|up)_exps\.weight=iq6_k
blk\..*\.ffn_down_exps\.weight=iq4_k
blk\..*\.ffn_(gate|up)_exps\.weight=iq4_k

# Lightning indexer tensors [0-78]
# NOTE: indexer.* NOT implemented at time of quantizing so no imatrix data available
blk\..*\.indexer\.proj\.weight=q8_0
blk\..*\.indexer\.attn_k\.weight=q8_0
blk\..*\.indexer\.attn_q_b\.weight=q8_0

# NextN MTP Layer [78]
# NOTE: nextn.* NOT implemented at time of quantizing so no imatrix data available
blk\..*\.nextn\.eh_proj\.weight=q8_0

# Non-Repeating Layers
token_embd\.weight=iq6_k
output\.weight=iq6_k
"

custom=$(
  echo "$custom" | grep -v '^#' | \
  sed -Ez 's:\n+:,:g;s:,$::;s:^,::'
)

numactl -N ${SOCKET} -m ${SOCKET} \
./build/bin/llama-quantize \
    --custom-q "$custom" \
    --imatrix /mnt/data/models/ubergarm/GLM-5.1-GGUF/imatrix-GLM-5.1-BF16.dat \
    /mnt/data/models/ubergarm/GLM-5.1-GGUF/GLM-256x22B-5.1-BF16-00001-of-00033.gguf \
    /mnt/data/models/ubergarm/GLM-5.1-GGUF/GLM-5.1-smol-IQ4_K.gguf \
    IQ4_K \
    128
Quick Startbashllama.cpp
# Clone and checkout
$ git clone https://github.com/ikawrakow/ik_llama.cpp
$ cd ik_llama.cpp

# Build for hybrid CPU+CUDA
$ cmake -B build -DCMAKE_BUILD_TYPE=Release -DGGML_CUDA=ON
$ cmake --build build --config Release -j $(nproc)

# Download Quants
$ pip install huggingface_hub
$ hf download --local-dir ./GLM-5.1-GGUF/ --include=smol-IQ2_KS/*.gguf ubergarm/GLM-5.1-GGUF

# Hybrid CPU and Single GPU
# *NOTE* -fit might work on ik_llama.cpp now so give it a try
./build/bin/llama-server \
    --model "$model"\
    --alias ubergarm/GLM-5.1 \
    -muge \
    --merge-qkv \
    --ctx-size 131072 \
    -ctk f16 \
    -mla 3 \
    -amb 512 \
    -ngl 999 \
    --n-cpu-moe 50 \
    --parallel 1 \
    --threads 96 \
    --threads-batch 128 \
    --host 127.0.0.1 \
    --port 8080 \
    --no-mmap \
    -cram 8192 \
    --jinja

# CPU-Only
numactl -N ${SOCKET} -m ${SOCKET} \
./build/bin/llama-server \
    --model "$model"\
    --alias ubergarm/GLM-5.1 \
    -muge \
    --merge-qkv \
    --ctx-size 131072 \
    -ctk q8_0 \
    -mla 3 \
    --parallel 1 \
    --threads 96 \
    --threads-batch 128 \
    --numa numactl \
    --host 127.0.0.1 \
    --port 8080 \
    --no-mmap \
    -cram 8192 \
    --jinja

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.