Devstral-Vision-Small-2507-gguf
456
10
Q4
license:apache-2.0
by
QuixiAI
Other
OTHER
New
456 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Code Examples
Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192Usage Examplesbashllama.cpp
# Download the model
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
Devstral-Small-Vision-2507-Q4_K_M.gguf \
--local-dir .
huggingface-cli download cognitivecomputations/Devstral-Vision-Small-2507-GGUF \
mmproj-BF16.gguf \
--local-dir .
# Run with llama.cpp
./llama-cli -m Devstral-Small-Vision-2507-Q4_K_M.gguf \
-p "Analyze this UI and generate React code" \
--image screenshot.png \
-c 8192With LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionWith LM Studiobash
# Create Modelfile
cat > Modelfile << EOF
FROM ./Devstral-Small-Vision-2507-Q4_K_M.gguf
PARAMETER temperature 0.7
PARAMETER num_ctx 8192
EOF
# Create and run
ollama create devstral-vision -f Modelfile
ollama run devstral-visionCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalCreate and runbash
python koboldcpp.py --model Devstral-Small-Vision-2507-Q4_K_M.gguf \
--contextsize 8192 \
--gpulayers 999 \
--multimodalDeploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.