QuerySense-Preview
11
1 language
license:apache-2.0
by
jaeyong2
Language Model
OTHER
New
11 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary
License - Qwen/Qwen3-1.
Code Examples
Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Exampletexttransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.