Sentence-ALDi
19.1K
3
—
by
AMR-KELEG
Other
OTHER
Fair
19K downloads
Community-tested
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary
[](https://github.com/AMR-KELEG/ALDi) [](https://huggingface.co/spaces/AMR-KELEG/ALDi) A BERT-based model fine-tuned to estimate the Arabic Level of Dialectnes...
Code Examples
Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Usagetexttransformers
from transformers import AutoTokenizer, AutoModelForSequenceClassification
model_name = "AMR-KELEG/Sentence-ALDi"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
def compute_score(sentence):
inputs = tokenizer(sentence, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
return min(max(0, logits[0][0].item()), 1)
if __name__ == "__main__":
s1 = "الطقس جيد اليوم"
s2 = "الجو ØÙ„Ùˆ النهاردة"
print(s1, round(compute_score(s1), 3)) # 0
print(s2, round(compute_score(s2), 3)) #Â 0.951Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.