SMS-Spam-Detection-Model

33
by
AventIQ-AI
Other
OTHER
New
33 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Code Examples

Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))
Inferencepythontransformers
from transformers import BertTokenizerFast, BertForTokenClassification
from transformers import pipeline
import torch

model_name = "AventIQ-AI/SMS-Spam-Detection-Model"
tokenizer = BertTokenizerFast.from_pretrained(model_name)
model = BertForTokenClassification.from_pretrained(model_name)
model.eval()


# Inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

def predict_sms(text):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted = torch.argmax(logits, dim=1).item()
    return "spam" if predicted == 1 else "ham"

# Test example
print(predict_sms("You've won $1,000,000! Call now to claim your prize!"))

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.