QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3

2
1
32.0B
by
ModelCloud
Other
OTHER
32B params
New
2 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
72GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
30GB+ RAM

Code Examples

How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelbash
# install mlx
pip install mlx_lm
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
How to run this modelpython
from mlx_lm import load, generate

mlx_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"
mlx_model, tokenizer = load(mlx_path)
prompt = "The capital of France is"

messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
    messages, add_generation_prompt=True
)

text = generate(mlx_model, tokenizer, prompt=prompt, verbose=True)
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxbash
# install gptqmodel with mlx
pip install gptqmodel[mlx] --no-build-isolation
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")
Export gptq to mlxpython
from gptqmodel import GPTQModel

# load gptq quantized model
gptq_model_path = "ModelCloud/QwQ-32B-Preview-gptqmodel-4bit-vortex-v3"
mlx_path = f"./vortex/QwQ-32B-Preview-gptqmodel-4bit-vortex-mlx-v3"

# export to mlx model
GPTQModel.export(gptq_model_path, mlx_path, "mlx")

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.