rank1-mistral-2501-24b-awq
1
2 languages
license:mit
by
jhu-clsp
Language Model
OTHER
24B params
New
1 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
54GB+ RAM
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
23GB+ RAM
Code Examples
Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Initialize the model with vLLMpythontransformers
from vllm import LLM, SamplingParams
import math
# Initialize the model with vLLM
model = LLM(
model="jhu-clsp/rank1-mistral-2501-24b-awq",
tensor_parallel_size=1, # Number of GPUs
trust_remote_code=True,
max_model_len=16000, # Context length
gpu_memory_utilization=0.9,
dtype="auto", # Will use the appropriate quantized dtype
)
# Set up sampling parameters
sampling_params = SamplingParams(
temperature=0,
max_tokens=8192,
logprobs=20,
stop=["</think> true", "</think> false"],
skip_special_tokens=False
)
# Prepare the prompt
def create_prompt(query, document):
return (
"Determine if the following passage is relevant to the query. "
"Answer only with 'true' or 'false'.\n"
f"Query: {query}\n"
f"Passage: {document}\n"
"<think>"
)
# Example usage
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
# Generate prediction
prompt = create_prompt(query, document)
outputs = model.generate([prompt], sampling_params)
# Extract score
output = outputs[0].outputs[0]
text = output.text
final_logits = output.logprobs[-1]
# Get token IDs for "true" and "false" tokens
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
true_token = tokenizer(" true", add_special_tokens=False).input_ids[0]
false_token = tokenizer(" false", add_special_tokens=False).input_ids[0]
# Calculate relevance score (probability of "true")
true_logit = final_logits[true_token].logprob
false_logit = final_logits[false_token].logprob
true_score = math.exp(true_logit)
false_score = math.exp(false_logit)
relevance_score = true_score / (true_score + false_score)
print(f"Reasoning chain: {text}")
print(f"Relevance score: {relevance_score}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")Load the tokenizer and quantized modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load the tokenizer and quantized model
tokenizer = AutoTokenizer.from_pretrained("jhu-clsp/rank1-mistral-2501-24b-awq")
model = AutoModelForCausalLM.from_pretrained(
"jhu-clsp/rank1-mistral-2501-24b-awq",
device_map="auto",
trust_remote_code=True
)
# Prepare the prompt
query = "What are the effects of climate change?"
document = "Climate change leads to rising sea levels, extreme weather events, and disruptions to ecosystems. These effects are caused by increasing greenhouse gas concentrations in the atmosphere due to human activities."
prompt = f"Determine if the following passage is relevant to the query. Answer only with 'true' or 'false'.\nQuery: {query}\nPassage: {document}\n<think>"
# Generate the reasoning chain and relevance judgment
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.0,
return_dict_in_generate=True,
output_scores=True,
pad_token_id=tokenizer.eos_token_id
)
# Process the output
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=False)
reasoning_chain = generated_text.split("<think>")[1].split("</think>")[0].strip()
relevance_judgment = "true" if "true" in generated_text.split("</think>")[1].strip().lower() else "false"
print(f"Reasoning chain: {reasoning_chain}")
print(f"Relevance judgment: {relevance_judgment}")MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)MTEB Integrationpython
from mteb import MTEB
from rank1 import rank1 # From the official repo
# Initialize the model
model = rank1(
model_name_or_path="jhu-clsp/rank1-mistral-2501-24b-awq",
num_gpus=1,
device="cuda",
quantized=True # Indicate that you're using the quantized version
)
# Run evaluation on specific tasks
evaluation = MTEB(tasks=["NevIR"])
results = evaluation.run(model)Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Run evaluation on specific tasksbibtex
@misc{weller2025rank1testtimecomputereranking,
title={Rank1: Test-Time Compute for Reranking in Information Retrieval},
author={Orion Weller and Kathryn Ricci and Eugene Yang and Andrew Yates and Dawn Lawrie and Benjamin Van Durme},
year={2025},
eprint={2502.18418},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2502.18418},
}Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.