distilbert-disease-specialist-recommendation
3
5
—
by
AventIQ-AI
Other
OTHER
New
3 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary
Model Overview This is a Zero-shot Classification Model designed to classify the appropriate medical department or specialist a patient should consult based on their symptoms.
Code Examples
How to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarityHow to Use the Modeltexttransformers
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]**Loading The Model**text
model = AutoModelForSequenceClassification.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
tokenizer = AutoTokenizer.from_pretrained("AventIQ-AI/distilbert-disease-specialist-recommendation")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def encode_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding="max_length").to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True)
embedding = outputs.hidden_states[-1].mean(dim=1).squeeze(0).cpu().numpy()
return embedding
candidate_labels = ["Cardiology", "Neurology", "Orthopedics", "Dermatology"]
candidate_embeddings = np.array([encode_text(label) for label in candidate_labels])
def zero_shot_classification_batch(texts):
text_embeddings = np.array([encode_text(text) for text in texts])
similarities = cosine_similarity(text_embeddings, candidate_embeddings)
ranked_results = [
sorted(zip(candidate_labels, similarity), key=lambda x: x[1], reverse=True)
for similarity in similarities
]
return ranked_results
print(zero_shot_classification_batch(["Suffering from High Fever and body Itching"]))
# EXPECTED OUTPUT : [[('Dermatology', 0.55948263), ('Orthopedics', 0.29858905), ('Cardiology', 0.25098807), ('Neurology', 0.17517035)]]Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.