jina-code-embeddings-0.5b
13.5K
16
license:cc-by-nc-4.0
by
jinaai
Embedding Model
OTHER
0.5B params
Fair
14K downloads
Community-tested
Edge AI:
Mobile
Laptop
Server
2GB+ RAM
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
1GB+ RAM
Code Examples
tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])tensor([[0.8168, 0.1236],pythonpytorch
# !pip install sentence_transformers>=5.0.0 torch>=2.7.1
import torch
from sentence_transformers import SentenceTransformer
# Load the model
model = SentenceTransformer(
"jinaai/jina-code-embeddings-0.5b",
model_kwargs={
"torch_dtype": torch.bfloat16,
"attn_implementation": "flash_attention_2",
"device_map": "cuda"
},
tokenizer_kwargs={"padding_side": "left"},
)
# The queries and documents to embed
queries = [
"print hello world in python",
"initialize array of 5 zeros in c++"
]
documents = [
"print('Hello World!')",
"int arr[5] = {0, 0, 0, 0, 0};"
]
query_embeddings = model.encode(queries, prompt_name="nl2code_query")
document_embeddings = model.encode(documents, prompt_name="nl2code_document")
# Compute the (cosine) similarity between the query and document embeddings
similarity = model.similarity(query_embeddings, document_embeddings)
print(similarity)
# tensor([[0.8169, 0.1214],
# [0.1190, 0.5500]])Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.