error-Qwen-3-0.6B-GRPO-Vi-Medical-LoRA

5
2 languages
by
danhtran2mind
Other
OTHER
0.6B params
New
5 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
2GB+ RAM
Mobile
Laptop
Server
Quick Summary

This model is a fine-tuned version of unsloth/qwen3-0.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
1GB+ RAM

Code Examples

Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""
Inference Usagepythontransformers
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import TextStreamer
from peft import PeftModel

device = "cuda" if torch.cuda.is_available() else "cpu"

# Set random seed for reproducibility
seed = 42
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

# Define model and LoRA adapter paths
base_model_name = "Qwen/Qwen3-0.6B"
lora_adapter_name = "temp_model"  # Replace with actual LoRA adapter path

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# Load base model with optimized settings
model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.float16,  # Use FP16 for efficiency
    device_map=device,
    trust_remote_code=True
)

# Apply LoRA adapter
model = PeftModel.from_pretrained(model, lora_adapter_name)

# Set model to evaluation mode
model.eval()

SYSTEM_PROMPT = """
Trả lời theo định dạng sau đây:
<reasoning>
...
</reasoning>
<answer>
...
</answer>
"""

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.