Huihui-GLM-4.1V-9B-Thinking-abliterated

65
12
9.0B
2 languages
license:mit
by
huihui-ai
Image Model
OTHER
9B params
New
65 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
21GB+ RAM
Mobile
Laptop
Server
Quick Summary

This is an uncensored version of THUDM/GLM-4.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
9GB+ RAM

Code Examples

Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)
Usagepythontransformers
from transformers import AutoProcessor, Glm4vForConditionalGeneration, BitsAndBytesConfig
from PIL import Image
import requests
import torch
import base64

model_id = "huihui-ai/Huihui-GLM-4.1V-9B-Thinking-abliterated"

quant_config_4 = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=True,
    llm_int8_enable_fp32_cpu_offload=True,
)

model = Glm4vForConditionalGeneration.from_pretrained(
    model_id, 
    device_map="auto", 
    quantization_config=quant_config_4,
    torch_dtype=torch.bfloat16
).eval()

processor = AutoProcessor.from_pretrained(model_id, use_fast=True)

# https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png
image_path = model_id + "/Grayscale_8bits_palette_sample_image.png"

with Image.open(image_path) as image:
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "image", "image": image},
                {"type": "text", "text": "Describe this image in detail."}
            ]
        }
    ]

    inputs = processor.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_dict=True,
        return_tensors="pt"
    ).to(model.device)

    with torch.inference_mode():
        generated_ids  = model.generate(**inputs, max_new_tokens=8192)

        output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
        print(output_text)

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.