Malaysian-Normalizer-Qwen3-8B
3
8.0B
2 languages
—
by
malaysia-ai
Other
OTHER
8B params
New
3 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
18GB+ RAM
Mobile
Laptop
Server
Quick Summary
Finetune Qwen/Qwen3-8B on mesolitica/Malaysian-Normalizer - `text` is the text you want to normalize.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
8GB+ RAM
Code Examples
Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)Examplepythontransformers
from transformers import TextStreamer, AutoModelForCausalLM, AutoTokenizer
import transformers
import torch
model = AutoModelForCausalLM.from_pretrained(
'malaysia-ai/Malaysian-Normalizer-Qwen3-8B',
torch_dtype='auto'
).cuda()
tokenizer = AutoTokenizer.from_pretrained('malaysia-ai/Malaysian-Normalizer-Qwen3-8B')
user = """
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language
"""
message = [
{'role': 'user', 'content': user.strip()}
]
prompt = tokenizer.apply_chat_template(message, add_generation_prompt = True, tokenize = False)
generate_kwargs = dict(
**tokenizer(prompt, return_tensors = 'pt').to('cuda'),
max_new_tokens=1024,
top_p=0.9,
top_k=50,
temperature=0.9,
do_sample=True,
repetition_penalty=1.0,
)
generation_output = model.generate(**generate_kwargs)text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>text
<|im_start|>user
given the text
text: “Oleochemical exports dropped 2.72 per cent m-o-m to 210,924 tonnes from 216,816 tonnes while biodiesel exports fell 48.89 per cent m-o-m to 23,689 tonnes from 46,345 tonnes,” it said.
normalize to english language<|im_end|>
<|im_start|>assistant
<think>
</think>
{"normalized_text": "open quote Oleochemical exports dropped two point seven two per cent m dash o dash m to two hundred ten thousand nine hundred twenty four tonnes from two hundred sixteen thousand eight hundred sixteen tonnes while biodiesel exports fell forty eight point eight nine per cent m dash o dash m to twenty three thousand six hundred eighty nine tonnes from forty six thousand three hundred forty five tonnes, close quote it said.", "normalizer_mapping": {"\u201c": "open quote", "2.72": "two point seven two", "m-o-m": "m dash o dash m", "210,924": "two hundred ten thousand nine hundred twenty four", "216,816": "two hundred sixteen thousand eight hundred sixteen", "48.89": "forty eight point eight nine", "23,689": "twenty three thousand six hundred eighty nine", "46,345": "forty six thousand three hundred forty five", "\u201d": "close quote"}}<|im_end|>Deploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.