mbert_LusakaLang_MultiTask
47
license:apache-2.0
by
Kelvinmbewe
Embedding Model
OTHER
New
47 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary
AI model with specialized capabilities.
Code Examples
**How to Use This Model**pythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import torch.nn.functional as F
class LusakaLangMultiTask:
"""
LusakaLang MultiTask Model:
- Language Identification
- Sentiment Analysis
- Topic Classification
"""
def __init__(self, path="Kelvinmbewe/LusakaLang-MultiTask",
lang_temp=1.0, sent_temp=1.0, topic_temp=1.0):
# Load tokenizer and models
self.tokenizer = AutoTokenizer.from_pretrained(path)
self.lang_model = AutoModelForSequenceClassification.from_pretrained(
"Kelvinmbewe/mbert_Lusaka_Language_Analysis"
)
self.sent_model = AutoModelForSequenceClassification.from_pretrained(
"Kelvinmbewe/mbert_LusakaLang_Sentiment_Analysis"
)
self.topic_model = AutoModelForSequenceClassification.from_pretrained(
"Kelvinmbewe/mbert_LusakaLang_Topic"
)
# ID2Label mappings
self.lang_id2label = self.lang_model.config.id2label
self.sent_id2label = self.sent_model.config.id2label
self.topic_id2label = self.topic_model.config.id2label
# Temperature scaling
self.lang_temp = lang_temp
self.sent_temp = sent_temp
self.topic_temp = topic_temp
# Device setup
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.lang_model.to(self.device)
self.sent_model.to(self.device)
self.topic_model.to(self.device)
def predict_batch(self, texts, conf_threshold=0.5, batch_size=16):
"""
Predict language, sentiment, and topic for a list of texts.
Returns a list of dicts: [{"text": ..., "language":..., "sentiment":..., "topic":...}, ...]
"""
results = []
for i in range(0, len(texts), batch_size):
batch_texts = texts[i:i + batch_size]
inputs = self.tokenizer(
batch_texts,
return_tensors="pt",
truncation=True,
padding=True
).to(self.device)
with torch.no_grad():
# Language
lang_logits = self.lang_model(**inputs).logits / self.lang_temp
lang_probs = F.softmax(lang_logits, dim=-1)
lang_conf, lang_idx = torch.max(lang_probs, dim=-1)
# Sentiment
sent_logits = self.sent_model(**inputs).logits / self.sent_temp
sent_probs = F.softmax(sent_logits, dim=-1)
sent_conf, sent_idx = torch.max(sent_probs, dim=-1)
# Topic
topic_logits = self.topic_model(**inputs).logits / self.topic_temp
topic_probs = F.softmax(topic_logits, dim=-1)
topic_conf, topic_idx = torch.max(topic_probs, dim=-1)
for j, text in enumerate(batch_texts):
results.append({
"text": text,
"language": self.lang_id2label[lang_idx[j].item()]
if lang_conf[j].item() >= conf_threshold else "unknown",
"language_conf": round(lang_conf[j].item(), 3),
"sentiment": self.sent_id2label[sent_idx[j].item()],
"sentiment_conf": round(sent_conf[j].item(), 3),
"topic": self.topic_id2label[topic_idx[j].item()]
if topic_conf[j].item() >= conf_threshold else "unknown",
"topic_conf": round(topic_conf[j].item(), 3)
})
return results
# ================= Example Usage =================
llm = LusakaLangMultiTask(lang_temp=1.2, sent_temp=0.93, topic_temp=1.5)
samples = [
"Driver was rude, shouting all the way",
"Payment failed, money deducted but no ride",
"Support did not reply to my complaint",
"Umudriver alisala sana, alelanda ifintu ifipusa",
]
predictions = llm.predict_batch(samples, conf_threshold=0.5)
for p in predictions:
print(f"TEXT: {p['text']}")
print(f" Language : {p['language']} (conf={p['language_conf']})")
print(f" Sentiment: {p['sentiment']} (conf={p['sentiment_conf']})")
print(f" Topic : {p['topic']} (conf={p['topic_conf']})\n")Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.