banglabert-covid-sentiment-fakenews

2
1 language
license:apache-2.0
by
ahs95
Other
OTHER
19B params
New
2 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
43GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
18GB+ RAM

Code Examples

How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

# Load tokenizer and model
model_name = "your-hf-username/banglabert-multitask-covid-sa-fake"  # Replace with your model ID
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

# For inference, you need to handle the dual-head output manually.
# This model returns two logits tensors: one for sentiment (3 classes), one for truthfulness (2 classes).

def predict(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        # The model returns a tuple: (sentiment_logits, truthfulness_logits)
        sent_logits = outputs[0]  # Shape: [1, 3]
        truth_logits = outputs[1] # Shape: [1, 2]

        sent_pred = torch.argmax(sent_logits, dim=-1).item()
        truth_pred = torch.argmax(truth_logits, dim=-1).item()

        # Map IDs to labels
        sentiment_labels = ["negative", "neutral", "positive"]
        truth_labels = ["fake", "real"]

        return {
            "sentiment": sentiment_labels[sent_pred],
            "truthfulness": truth_labels[truth_pred],
            "sentiment_confidence": torch.softmax(sent_logits, dim=-1).tolist()[0],
            "truthfulness_confidence": torch.softmax(truth_logits, dim=-1).tolist()[0]
        }

# Example usage
text = "করোনা ভাইরাস নিয়ে সরকারের পদক্ষেপ অত্যন্ত প্রশংসনীয়।"
result = predict(text)
print(result)
# Output: {'sentiment': 'positive', 'truthfulness': 'real', ...}

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.