Lucy-128k
224
107
1 language
license:apache-2.0
by
Menlo
Language Model
OTHER
2508.00360B params
New
224 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
5606GB+ RAM
Mobile
Laptop
Server
Quick Summary
Lucy: Edgerunning Agentic Web Search on Mobile with a 1.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
2336GB+ RAM
Code Examples
Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072Deploymentbashvllm
vllm serve Menlo/Lucy-128k \
--host 0.0.0.0 \
--port 1234 \
--enable-auto-tool-choice \
--tool-call-parser hermes \
--rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960bash
llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.