Mistral-Nemo-Kurdish-Instruct

60
3
2 languages
BF16
license:apache-2.0
by
nazimali
Language Model
OTHER
New
60 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
Unknown
Mobile
Laptop
Server
Quick Summary

ئەمە مۆدێلێکی پارامێتری 12B یە، وردکراوە لەسەر نازیماڵی/میستراڵ-نیمۆ-کوردی بۆ یەک داتا سێتی ڕێنمایی کوردی (کرمانجی).

Code Examples

Example usagepythonllama.cpp
from llama_cpp import Llama

inference_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

llm = Llama.from_pretrained(
	repo_id="nazimali/Mistral-Nemo-Kurdish-Instruct",
	filename="Q4_K_M.gguf",
)

llm.create_chat_completion(
	messages = [
		{
			"role": "user",
			"content": inference_prompt.format("سڵاو ئەلیکوم، چۆنیت؟")
		}
	]
)
Example usagepythonllama.cpp
from llama_cpp import Llama

inference_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

llm = Llama.from_pretrained(
	repo_id="nazimali/Mistral-Nemo-Kurdish-Instruct",
	filename="Q4_K_M.gguf",
)

llm.create_chat_completion(
	messages = [
		{
			"role": "user",
			"content": inference_prompt.format("سڵاو ئەلیکوم، چۆنیت؟")
		}
	]
)
Example usagepythonllama.cpp
from llama_cpp import Llama

inference_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

llm = Llama.from_pretrained(
	repo_id="nazimali/Mistral-Nemo-Kurdish-Instruct",
	filename="Q4_K_M.gguf",
)

llm.create_chat_completion(
	messages = [
		{
			"role": "user",
			"content": inference_prompt.format("سڵاو ئەلیکوم، چۆنیت؟")
		}
	]
)
Example usagepythonllama.cpp
from llama_cpp import Llama

inference_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

llm = Llama.from_pretrained(
	repo_id="nazimali/Mistral-Nemo-Kurdish-Instruct",
	filename="Q4_K_M.gguf",
)

llm.create_chat_completion(
	messages = [
		{
			"role": "user",
			"content": inference_prompt.format("سڵاو ئەلیکوم، چۆنیت؟")
		}
	]
)
Example usagepythonllama.cpp
from llama_cpp import Llama

inference_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

llm = Llama.from_pretrained(
	repo_id="nazimali/Mistral-Nemo-Kurdish-Instruct",
	filename="Q4_K_M.gguf",
)

llm.create_chat_completion(
	messages = [
		{
			"role": "user",
			"content": inference_prompt.format("سڵاو ئەلیکوم، چۆنیت؟")
		}
	]
)
Example usagepythonllama.cpp
from llama_cpp import Llama

inference_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

llm = Llama.from_pretrained(
	repo_id="nazimali/Mistral-Nemo-Kurdish-Instruct",
	filename="Q4_K_M.gguf",
)

llm.create_chat_completion(
	messages = [
		{
			"role": "user",
			"content": inference_prompt.format("سڵاو ئەلیکوم، چۆنیت؟")
		}
	]
)
Example usagepythonllama.cpp
from llama_cpp import Llama

inference_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

llm = Llama.from_pretrained(
	repo_id="nazimali/Mistral-Nemo-Kurdish-Instruct",
	filename="Q4_K_M.gguf",
)

llm.create_chat_completion(
	messages = [
		{
			"role": "user",
			"content": inference_prompt.format("سڵاو ئەلیکوم، چۆنیت؟")
		}
	]
)
llama.cppbash
./llama-cli \
  --hf-repo "nazimali/Mistral-Nemo-Kurdish-Instruct" \
  --hf-file Q4_K_M.gguf \
  -p "selam alikum, tu çawa yî?" \
  --conversation
llama.cppbash
./llama-cli \
  --hf-repo "nazimali/Mistral-Nemo-Kurdish-Instruct" \
  --hf-file Q4_K_M.gguf \
  -p "selam alikum, tu çawa yî?" \
  --conversation
llama.cppbash
./llama-cli \
  --hf-repo "nazimali/Mistral-Nemo-Kurdish-Instruct" \
  --hf-file Q4_K_M.gguf \
  -p "selam alikum, tu çawa yî?" \
  --conversation
llama.cppbash
./llama-cli \
  --hf-repo "nazimali/Mistral-Nemo-Kurdish-Instruct" \
  --hf-file Q4_K_M.gguf \
  -p "selam alikum, tu çawa yî?" \
  --conversation
llama.cppbash
./llama-cli \
  --hf-repo "nazimali/Mistral-Nemo-Kurdish-Instruct" \
  --hf-file Q4_K_M.gguf \
  -p "selam alikum, tu çawa yî?" \
  --conversation
llama.cppbash
./llama-cli \
  --hf-repo "nazimali/Mistral-Nemo-Kurdish-Instruct" \
  --hf-file Q4_K_M.gguf \
  -p "selam alikum, tu çawa yî?" \
  --conversation
llama.cppbash
./llama-cli \
  --hf-repo "nazimali/Mistral-Nemo-Kurdish-Instruct" \
  --hf-file Q4_K_M.gguf \
  -p "selam alikum, tu çawa yî?" \
  --conversation
llama.cpppythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig

infer_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

model_id = "nazimali/Mistral-Nemo-Kurdish-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_id)

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
)

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=bnb_config,
    device_map="auto",
)

model.eval()


def call_llm(user_input, instructions=None):
    instructions = instructions or "tu arîkarek alîkar î"
    prompt = infer_prompt.format(instructions, user_input)

    input_ids = tokenizer(
        prompt,
        return_tensors="pt",
        add_special_tokens=False,
        return_token_type_ids=False,
    ).to("cuda")

    with torch.inference_mode():
        generated_ids = model.generate(
            **input_ids,
            max_new_tokens=120,
            do_sample=True,
            temperature=0.7,
            top_p=0.7,
            num_return_sequences=1,
            pad_token_id=tokenizer.pad_token_id,
            eos_token_id=tokenizer.eos_token_id,
        )

    decoded_output = tokenizer.batch_decode(generated_ids)[0]

    return decoded_output.replace(prompt, "").replace("</s>", "")

response = call_llm("سڵاو ئەلیکوم، چۆنیت؟")
print(response)
llama.cpppythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig

infer_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

model_id = "nazimali/Mistral-Nemo-Kurdish-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_id)

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
)

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=bnb_config,
    device_map="auto",
)

model.eval()


def call_llm(user_input, instructions=None):
    instructions = instructions or "tu arîkarek alîkar î"
    prompt = infer_prompt.format(instructions, user_input)

    input_ids = tokenizer(
        prompt,
        return_tensors="pt",
        add_special_tokens=False,
        return_token_type_ids=False,
    ).to("cuda")

    with torch.inference_mode():
        generated_ids = model.generate(
            **input_ids,
            max_new_tokens=120,
            do_sample=True,
            temperature=0.7,
            top_p=0.7,
            num_return_sequences=1,
            pad_token_id=tokenizer.pad_token_id,
            eos_token_id=tokenizer.eos_token_id,
        )

    decoded_output = tokenizer.batch_decode(generated_ids)[0]

    return decoded_output.replace(prompt, "").replace("</s>", "")

response = call_llm("سڵاو ئەلیکوم، چۆنیت؟")
print(response)
llama.cpppythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig

infer_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

model_id = "nazimali/Mistral-Nemo-Kurdish-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_id)

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
)

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=bnb_config,
    device_map="auto",
)

model.eval()


def call_llm(user_input, instructions=None):
    instructions = instructions or "tu arîkarek alîkar î"
    prompt = infer_prompt.format(instructions, user_input)

    input_ids = tokenizer(
        prompt,
        return_tensors="pt",
        add_special_tokens=False,
        return_token_type_ids=False,
    ).to("cuda")

    with torch.inference_mode():
        generated_ids = model.generate(
            **input_ids,
            max_new_tokens=120,
            do_sample=True,
            temperature=0.7,
            top_p=0.7,
            num_return_sequences=1,
            pad_token_id=tokenizer.pad_token_id,
            eos_token_id=tokenizer.eos_token_id,
        )

    decoded_output = tokenizer.batch_decode(generated_ids)[0]

    return decoded_output.replace(prompt, "").replace("</s>", "")

response = call_llm("سڵاو ئەلیکوم، چۆنیت؟")
print(response)
llama.cpppythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig

infer_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

model_id = "nazimali/Mistral-Nemo-Kurdish-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_id)

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
)

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=bnb_config,
    device_map="auto",
)

model.eval()


def call_llm(user_input, instructions=None):
    instructions = instructions or "tu arîkarek alîkar î"
    prompt = infer_prompt.format(instructions, user_input)

    input_ids = tokenizer(
        prompt,
        return_tensors="pt",
        add_special_tokens=False,
        return_token_type_ids=False,
    ).to("cuda")

    with torch.inference_mode():
        generated_ids = model.generate(
            **input_ids,
            max_new_tokens=120,
            do_sample=True,
            temperature=0.7,
            top_p=0.7,
            num_return_sequences=1,
            pad_token_id=tokenizer.pad_token_id,
            eos_token_id=tokenizer.eos_token_id,
        )

    decoded_output = tokenizer.batch_decode(generated_ids)[0]

    return decoded_output.replace(prompt, "").replace("</s>", "")

response = call_llm("سڵاو ئەلیکوم، چۆنیت؟")
print(response)
llama.cpppythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig

infer_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

model_id = "nazimali/Mistral-Nemo-Kurdish-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_id)

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
)

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=bnb_config,
    device_map="auto",
)

model.eval()


def call_llm(user_input, instructions=None):
    instructions = instructions or "tu arîkarek alîkar î"
    prompt = infer_prompt.format(instructions, user_input)

    input_ids = tokenizer(
        prompt,
        return_tensors="pt",
        add_special_tokens=False,
        return_token_type_ids=False,
    ).to("cuda")

    with torch.inference_mode():
        generated_ids = model.generate(
            **input_ids,
            max_new_tokens=120,
            do_sample=True,
            temperature=0.7,
            top_p=0.7,
            num_return_sequences=1,
            pad_token_id=tokenizer.pad_token_id,
            eos_token_id=tokenizer.eos_token_id,
        )

    decoded_output = tokenizer.batch_decode(generated_ids)[0]

    return decoded_output.replace(prompt, "").replace("</s>", "")

response = call_llm("سڵاو ئەلیکوم، چۆنیت؟")
print(response)
llama.cpppythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig

infer_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

model_id = "nazimali/Mistral-Nemo-Kurdish-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_id)

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
)

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=bnb_config,
    device_map="auto",
)

model.eval()


def call_llm(user_input, instructions=None):
    instructions = instructions or "tu arîkarek alîkar î"
    prompt = infer_prompt.format(instructions, user_input)

    input_ids = tokenizer(
        prompt,
        return_tensors="pt",
        add_special_tokens=False,
        return_token_type_ids=False,
    ).to("cuda")

    with torch.inference_mode():
        generated_ids = model.generate(
            **input_ids,
            max_new_tokens=120,
            do_sample=True,
            temperature=0.7,
            top_p=0.7,
            num_return_sequences=1,
            pad_token_id=tokenizer.pad_token_id,
            eos_token_id=tokenizer.eos_token_id,
        )

    decoded_output = tokenizer.batch_decode(generated_ids)[0]

    return decoded_output.replace(prompt, "").replace("</s>", "")

response = call_llm("سڵاو ئەلیکوم، چۆنیت؟")
print(response)
llama.cpppythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig

infer_prompt = """Li jêr rêwerzek heye ku peywirek rave dike, bi têketinek ku çarçoveyek din peyda dike ve tê hev kirin. Bersivek ku daxwazê ​​bi guncan temam dike binivîsin.
### Telîmat:
{}
### Têketin:
{}
### Bersiv:
"""

model_id = "nazimali/Mistral-Nemo-Kurdish-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_id)

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
)

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=bnb_config,
    device_map="auto",
)

model.eval()


def call_llm(user_input, instructions=None):
    instructions = instructions or "tu arîkarek alîkar î"
    prompt = infer_prompt.format(instructions, user_input)

    input_ids = tokenizer(
        prompt,
        return_tensors="pt",
        add_special_tokens=False,
        return_token_type_ids=False,
    ).to("cuda")

    with torch.inference_mode():
        generated_ids = model.generate(
            **input_ids,
            max_new_tokens=120,
            do_sample=True,
            temperature=0.7,
            top_p=0.7,
            num_return_sequences=1,
            pad_token_id=tokenizer.pad_token_id,
            eos_token_id=tokenizer.eos_token_id,
        )

    decoded_output = tokenizer.batch_decode(generated_ids)[0]

    return decoded_output.replace(prompt, "").replace("</s>", "")

response = call_llm("سڵاو ئەلیکوم، چۆنیت؟")
print(response)

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.