GLM-4.7-GGUF

26
11
ik_llama.cpp
by
ubergarm
Language Model
OTHER
4.7B params
New
26 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
11GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
5GB+ RAM

Code Examples

IQ5_K 250.635 GiB (6.008 BPW)bash
#!/usr/bin/env bash

custom="
# 93 Repeating Layers [0-92]

# Attention
blk\..*\.attn_q.*=q8_0
blk\..*\.attn_k.*=q8_0
blk\..*\.attn_v.*=q8_0
blk\..*\.attn_output.*=q8_0

# First 3 Dense Layers [0-2]
blk\..*\.ffn_down\.weight=q8_0
blk\..*\.ffn_(gate|up)\.weight=q8_0

# Shared Expert Layers [3-92]
blk\..*\.ffn_down_shexp\.weight=q8_0
blk\..*\.ffn_(gate|up)_shexp\.weight=q8_0

# Routed Experts Layers [3-92]
blk\..*\.ffn_down_exps\.weight=iq6_k
blk\..*\.ffn_(gate|up)_exps\.weight=iq5_k

# NextN MTP Layer [92]
# Leave full q8_0 as supposedly better for MTP
# (doesn't use RAM or VRAM otherwise so its fine)
blk\..*\.nextn\.embed_tokens\.weight=q8_0
blk\..*\.nextn\.shared_head_head\.weight=q8_0
blk\..*\.nextn\.eh_proj\.weight=q8_0

# Non-Repeating Layers
token_embd\.weight=iq6_k
output\.weight=iq6_k
"

custom=$(
  echo "$custom" | grep -v '^#' | \
  sed -Ez 's:\n+:,:g;s:,$::;s:^,::'
)

numactl -N ${SOCKET} -m ${SOCKET} \
./build/bin/llama-quantize \
    --custom-q "$custom" \
    --imatrix /mnt/data/models/ubergarm/GLM-4.7-GGUF/imatrix-GLM-4.7-BF16.dat \
    /mnt/data/models/ubergarm/GLM-4.7-GGUF/GLM-160x21B-4.7-BF16-00001-of-00015.gguf \
    /mnt/data/models/ubergarm/GLM-4.7-GGUF/GLM-4.7-IQ5_K.gguf \
    IQ5_K \
    128
IQ2_KL 129.279 GiB (3.099 BPW)bash
#!/usr/bin/env bash

custom="
# 93 Repeating Layers [0-92]

# Attention
blk\.(0|1|2)\.attn_q.*=iq6_k
blk\.(0|1|2)\.attn_k.*=q8_0
blk\.(0|1|2)\.attn_v.*=q8_0
blk\.(0|1|2)\.attn_output.*=iq6_k

blk\..*\.attn_q.*=iq5_k
blk\..*\.attn_k.*=iq6_k
blk\..*\.attn_v.*=iq6_k
blk\..*\.attn_output.*=iq5_k

# First 3 Dense Layers [0-2]
blk\..*\.ffn_down\.weight=iq6_k
blk\..*\.ffn_(gate|up)\.weight=iq5_k

# Shared Expert Layers [3-92]
blk\..*\.ffn_down_shexp\.weight=iq6_k
blk\..*\.ffn_(gate|up)_shexp\.weight=iq5_k

# Routed Experts Layers [3-92]
blk\..*\.ffn_down_exps\.weight=iq3_k
blk\..*\.ffn_(gate|up)_exps\.weight=iq2_kl

# NextN MTP Layer [92]
blk\..*\.nextn\.embed_tokens\.weight=q8_0
blk\..*\.nextn\.shared_head_head\.weight=q8_0
blk\..*\.nextn\.eh_proj\.weight=q8_0

# Non-Repeating Layers
token_embd\.weight=iq4_k
output\.weight=iq6_k
"

custom=$(
  echo "$custom" | grep -v '^#' | \
  sed -Ez 's:\n+:,:g;s:,$::;s:^,::'
)

numactl -N ${SOCKET} -m ${SOCKET} \
./build/bin/llama-quantize \
    --custom-q "$custom" \
    --imatrix /mnt/data/models/ubergarm/GLM-4.7-GGUF/imatrix-GLM-4.7-BF16.dat \
    /mnt/data/models/ubergarm/GLM-4.7-GGUF/GLM-160x21B-4.7-BF16-00001-of-00015.gguf \
    /mnt/data/models/ubergarm/GLM-4.7-GGUF/GLM-4.7-v14-IQ2_KL.gguf \
    IQ2_KL \
    128
smol-IQ1_KT 82.442 GiB (1.976 BPW)bash
#!/usr/bin/env bash

custom="
# 93 Repeating Layers [0-92]

# Attention
blk\.(0|1|2)\.attn_q.*=q8_0
blk\.(0|1|2)\.attn_k.*=q8_0
blk\.(0|1|2)\.attn_v.*=q8_0
blk\.(0|1|2)\.attn_output.*=q8_0

blk\..*\.attn_q.*=iq5_ks
blk\..*\.attn_k.*=q8_0
blk\..*\.attn_v.*=q8_0
blk\..*\.attn_output.*=iq5_ks

# First 3 Dense Layers [0-2]
blk\..*\.ffn_down\.weight=iq5_ks
blk\..*\.ffn_(gate|up)\.weight=iq5_ks

# Shared Expert Layers [3-92]
blk\..*\.ffn_down_shexp\.weight=iq5_ks
blk\..*\.ffn_(gate|up)_shexp\.weight=iq5_ks

# Routed Experts Layers [3-92]
blk\..*\.ffn_down_exps\.weight=iq1_kt
blk\..*\.ffn_(gate|up)_exps\.weight=iq1_kt

# NextN MTP Layer [92]
blk\..*\.nextn\.embed_tokens\.weight=q8_0
blk\..*\.nextn\.shared_head_head\.weight=q8_0
blk\..*\.nextn\.eh_proj\.weight=q8_0

# Non-Repeating Layers
token_embd\.weight=iq4_k
output\.weight=iq6_k
"

custom=$(
  echo "$custom" | grep -v '^#' | \
  sed -Ez 's:\n+:,:g;s:,$::;s:^,::'
)

numactl -N ${SOCKET} -m ${SOCKET} \
./build/bin/llama-quantize \
    --custom-q "$custom" \
    --imatrix /mnt/data/models/ubergarm/GLM-4.7-GGUF/imatrix-GLM-4.7-BF16.dat \
    /mnt/data/models/ubergarm/GLM-4.7-GGUF/GLM-160x21B-4.7-BF16-00001-of-00015.gguf \
    /mnt/data/models/ubergarm/GLM-4.7-GGUF/GLM-4.7-smol-IQ1_KT.gguf \
    IQ1_KT \
    128
Quick Startbashllama.cpp
# Clone and checkout
$ git clone https://github.com/ikawrakow/ik_llama.cpp
$ cd ik_llama.cpp

# Build for hybrid CPU+CUDA
$ cmake -B build -DCMAKE_BUILD_TYPE=Release -DGGML_CUDA=ON
$ cmake --build build --config Release -j $(nproc)

# Hybrid CPU + 1 GPU
./build/bin/llama-sweep-bench \
    --model "$model" \
    --alias ubergarm/GLM-4.7 \
    --ctx-size 65536 \
    -ger \
    --merge-qkv \
    -ngl 99 \
    --n-cpu-moe 72 \
    -ub 4096 -b 4096 \
    --threads 24 \
    --parallel 1 \
    --host 127.0.0.1 \
    --port 8080 \
    --no-mmap \
    --jinja

# Hybrid CPU + 2 or more GPUs
# using new "-sm graph" 'tensor parallel' feature!
# https://github.com/ikawrakow/ik_llama.cpp/pull/1080
./build/bin/llama-sweep-bench \
    --model "$model" \
    --alias ubergarm/GLM-4.7 \
    --ctx-size 65536 \
    -ger \
    -sm graph \
    -smgs \
    -mea 256 \
    -ngl 99 \
    --n-cpu-moe 72 \
    -ts 41,48 \
    -ub 4096 -b 4096 \
    --threads 24 \
    --parallel 1 \
    --host 127.0.0.1 \
    --port 8080 \
    --no-mmap \
    --jinja
# --max-gpu=3 # 3 or 4 usually if >2 GPUs available

# CPU Only
SOCKET=0 numactl -N ${SOCKET} -m ${SOCKET} \
./build/bin/llama-server \
    --model "$model"\
    --alias ubergarm/GLM-4.7 \
    --ctx-size 65536 \
    -ger \
    --merge-qkv \
    -ctk q8_0 -ctv q8_0 \
    -ub 4096 -b 4096 \
    --parallel 1 \
    --threads 96 \
    --threads-batch 128 \
    --numa numactl \
    --host 127.0.0.1 \
    --port 8080 \
    --no-mmap \
    --jinja

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.