Sentence-ALDi

19.1K
3
—
by
AMR-KELEG
Other
OTHER
Fair
19K downloads
Community-tested
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary

[](https://github.com/AMR-KELEG/ALDi) [](https://huggingface.co/spaces/AMR-KELEG/ALDi) A BERT-based model fine-tuned to estimate the Arabic Level of Dialectnes...

Code Examples

Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)

def compute_score(sentence):
  inputs = tokenizer(sentence, return_tensors="pt")
  outputs = model(**inputs)
  logits = outputs.logits
  return min(max(0, logits[0][0].item()), 1)

if __name__ == "__main__":
  s1 = "الطقس جيد اليوم"
  s2 = "الجو حلو النهاردة"

  print(s1, round(compute_score(s1), 3)) # 0
  print(s2, round(compute_score(s2), 3)) # 0.951

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.