ctxl-rerank-v2-instruct-multilingual-6b-nvfp4

26
2
license:cc-by-nc-sa-4.0
by
ContextualAI
Language Model
OTHER
6B params
New
26 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
14GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
6GB+ RAM

Code Examples

Quickstartpythonvllm
# Requires vLLM==0.10.0 for NVFP4 support
# See full implementation below

model_path = "ContextualAI/ctxl-rerank-v2-instruct-multilingual-6b-nvfp4"

query = "What are the health benefits of exercise?"
instruction = "Prioritize recent medical research"
documents = [
    "Regular exercise reduces risk of heart disease and improves mental health.",
    "A 2024 study shows exercise enhances cognitive function in older adults.",
    "Ancient Greeks valued physical fitness for military training."
]

infer_w_vllm(model_path, query, instruction, documents)
text
Query: What are the health benefits of exercise?
Instruction: Prioritize recent medical research
Score: -2.2969 | Doc: A 2024 study shows exercise enhances cognitive function in older adults.
Score: -4.6875 | Doc: Regular exercise reduces risk of heart disease and improves mental health.
Score: -12.3750 | Doc: Ancient Greeks valued physical fitness for military training.
vLLM Usagepythonvllm
import os
os.environ['VLLM_USE_V1'] = '0'  # v1 engine doesn't support logits processor yet

import torch
from vllm import LLM, SamplingParams


def logits_processor(_, scores):
    """Custom logits processor for vLLM reranking."""
    index = scores[0].view(torch.uint16)
    scores = torch.full_like(scores, float("-inf"))
    scores[index] = 1
    return scores


def format_prompts(query: str, instruction: str, documents: list[str]) -> list[str]:
    """Format query and documents into prompts for reranking."""
    if instruction:
        instruction = f" {instruction}"
    prompts = []
    for doc in documents:
        prompt = f"Check whether a given document contains information helpful to answer the query.\n<Document> {doc}\n<Query> {query}{instruction} ??"
        prompts.append(prompt)
    return prompts


def infer_w_vllm(model_path: str, query: str, instruction: str, documents: list[str]):
    model = LLM(
        model=model_path,
        gpu_memory_utilization=0.85,
        max_model_len=8192,
        dtype="bfloat16",
        max_logprobs=2,
        max_num_batched_tokens=262144,
    )
    sampling_params = SamplingParams(
        temperature=0,
        max_tokens=1,
        logits_processors=[logits_processor]
    )
    prompts = format_prompts(query, instruction, documents)

    outputs = model.generate(prompts, sampling_params, use_tqdm=False)

    # Extract scores and create results
    results = []
    for i, output in enumerate(outputs):
        score = (
            torch.tensor([output.outputs[0].token_ids[0]], dtype=torch.uint16)
            .view(torch.bfloat16)
            .item()
        )    
        results.append((score, i, documents[i]))

    # Sort by score (descending)
    results = sorted(results, key=lambda x: x[0], reverse=True)

    print(f"Query: {query}")
    print(f"Instruction: {instruction}")
    for score, doc_id, doc in results:
        print(f"Score: {score:.4f} | Doc: {doc}")


# Example usage
if __name__ == "__main__":
    model_path = "ContextualAI/ctxl-rerank-v2-instruct-multilingual-6b-nvfp4"
    query = "What are the health benefits of exercise?"
    instruction = "Prioritize recent medical research"
    documents = [
        "Regular exercise reduces risk of heart disease and improves mental health.",
        "A 2024 study shows exercise enhances cognitive function in older adults.",
        "Ancient Greeks valued physical fitness for military training."
    ]
    
    infer_w_vllm(model_path, query, instruction, documents)
Citationbibtex
@misc{ctxl_rerank_v2_instruct_multilingual,
      title={Contextual AI Reranker v2}, 
      author={Halal, George and Agrawal, Sheshansh},
      year={2025},
      url={https://contextual.ai/blog/rerank-v2}, 
}

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.