Qwen2.5-Instruct-7B-COIG-P

79
7.0B
license:cc-by-nc-4.0
by
m-a-p
Language Model
OTHER
7B params
New
79 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
16GB+ RAM
Mobile
Laptop
Server
Quick Summary

This repository contains the Qwen2.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
7GB+ RAM

Code Examples

How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)
How to Get Started with the Modelpythontransformers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

device = "cuda"  # or "cpu" if you don't have a GPU

model = AutoModelForCausalLM.from_pretrained(
    "m-a-p/Qwen2.5-Instruct-7B-COIG-P",
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("m-a-p/Qwen2.5-Instruct-7B-COIG-P")

prompt = "给我一个大型语言模型的简短介绍。"  # Give me a short introduction to large language model.
messages = [
    {"role": "system", "content": "你是一个乐于助人的助手。"}, # You are a helpful assistant.
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
model_inputs = tokenizer([text], return_tensors="pt").to(device)

generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=512
)
generated_ids = [
    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
print(response)

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.