SweRankEmbed-Small
1.1K
4
license:cc-by-nc-4.0
by
Salesforce
Code Model
OTHER
New
1K downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Code Examples
Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Sentence-Transformerspython
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("Salesforce/SweRankEmbed-Small", trust_remote_code=True)
queries = ['Calculate the n-th factorial']
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
query_embeddings = model.encode(queries, prompt_name="query")
document_embeddings = model.encode(documents)
scores = query_embeddings @ document_embeddings.T
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
# Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Usage with Huggingface Transformerspythontransformers
import torch
from transformers import AutoModel, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('Salesforce/SweRankEmbed-Small')
model = AutoModel.from_pretrained('Salesforce/SweRankEmbed-Small', add_pooling_layer=False)
model.eval()
query_prefix = 'Represent this query for searching relevant code: '
queries = ['Calculate the n-th factorial']
queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries]
query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512)
documents = ['def fact(n):\n if n < 0:\n raise ValueError\n return 1 if n == 0 else n * fact(n - 1)']
document_tokens = tokenizer(documents, padding=True, truncation=True, return_tensors='pt', max_length=512)
# Compute token embeddings
with torch.no_grad():
query_embeddings = model(**query_tokens)[0][:, 0]
document_embeddings = model(**document_tokens)[0][:, 0]
# normalize embeddings
query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1)
document_embeddings = torch.nn.functional.normalize(document_embeddings, p=2, dim=1)
scores = torch.mm(query_embeddings, document_embeddings.transpose(0, 1))
for query, query_scores in zip(queries, scores):
doc_score_pairs = list(zip(documents, query_scores))
doc_score_pairs = sorted(doc_score_pairs, key=lambda x: x[1], reverse=True)
#Output passages & scores
print("Query:", query)
for document, score in doc_score_pairs:
print(score, document)Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.