QWEN2.5-7B-Bnk-3e

2
7.0B
license:mit
by
FINGU-AI
Language Model
OTHER
7B params
New
2 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
16GB+ RAM
Mobile
Laptop
Server
Quick Summary

QWEN2.5-7B-Bnk-5e is a multilingual translation model based on the QWEN 2.5 architecture with 7 billion parameters. It specializes in translating multiple langu...

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
7GB+ RAM

Code Examples

How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)
How to Usepythontransformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

model_name = "FINGU-AI/QWEN2.5-7B-Bnk-5e"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

# Example usage
source_text = "Hello, how are you?"
source_lang = "en"
target_lang = "ko"  # or "uz" for Uzbek

messages = [
        {"role": "system", "content": f"""Translate {input_lang} to {output_lang} word by word correctly."""},
        {"role": "user", "content": f"""{source_text}"""},
    ]
# Apply chat template
input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to('cuda')

outputs = model.generate(input_ids, max_length=100)
response = outputs[0][input_ids.shape[-1]:]
translated_text = tokenizer.decode(response, skip_special_tokens=True)
print(translated_text)

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.