pirate-qwen-14B

1
license:apache-2.0
by
winglian
Other
OTHER
14B params
New
1 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
32GB+ RAM
Mobile
Laptop
Server
Quick Summary

This model is a fine-tuned version of Qwen/Qwen3-14B on the winglian/pirate-ultrachat-10k dataset.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
14GB+ RAM

Code Examples

yaml
adapter: qlora
base_model: Qwen/Qwen3-14B
bf16: false
chat_template: qwen3
dataloader_num_workers: 2
dataloader_pin_memory: true
dataloader_prefetch_factor: 8
datasets:
- eot_tokens:
  - <|im_end|>
  field_messages: conversations
  message_property_mappings:
    content: content
    role: role
  path: winglian/pirate-ultrachat-10k
  split: train
  type: chat_template
fp16: true
gradient_accumulation_steps: 1
gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
learning_rate: 0.00019
load_in_4bit: true
logging_steps: 1
lora_alpha: 64
lora_mlp_kernel: true
lora_o_kernel: true
lora_qkv_kernel: true
lora_r: 32
lora_target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
- gate_proj
- down_proj
- up_proj
lr_scheduler: cosine
max_grad_norm: 0.1
micro_batch_size: 1
num_epochs: 1
optimizer: paged_adamw_8bit
output_dir: ./outputs/qwen-sft-pirate-rrr
plugins:
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
sample_packing: true
saves_per_epoch: 2
sequence_len: 4096
warmup_steps: 5
xformers_attention: true
yaml
adapter: qlora
base_model: Qwen/Qwen3-14B
bf16: false
chat_template: qwen3
dataloader_num_workers: 2
dataloader_pin_memory: true
dataloader_prefetch_factor: 8
datasets:
- eot_tokens:
  - <|im_end|>
  field_messages: conversations
  message_property_mappings:
    content: content
    role: role
  path: winglian/pirate-ultrachat-10k
  split: train
  type: chat_template
fp16: true
gradient_accumulation_steps: 1
gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
learning_rate: 0.00019
load_in_4bit: true
logging_steps: 1
lora_alpha: 64
lora_mlp_kernel: true
lora_o_kernel: true
lora_qkv_kernel: true
lora_r: 32
lora_target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
- gate_proj
- down_proj
- up_proj
lr_scheduler: cosine
max_grad_norm: 0.1
micro_batch_size: 1
num_epochs: 1
optimizer: paged_adamw_8bit
output_dir: ./outputs/qwen-sft-pirate-rrr
plugins:
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
sample_packing: true
saves_per_epoch: 2
sequence_len: 4096
warmup_steps: 5
xformers_attention: true
yaml
adapter: qlora
base_model: Qwen/Qwen3-14B
bf16: false
chat_template: qwen3
dataloader_num_workers: 2
dataloader_pin_memory: true
dataloader_prefetch_factor: 8
datasets:
- eot_tokens:
  - <|im_end|>
  field_messages: conversations
  message_property_mappings:
    content: content
    role: role
  path: winglian/pirate-ultrachat-10k
  split: train
  type: chat_template
fp16: true
gradient_accumulation_steps: 1
gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
learning_rate: 0.00019
load_in_4bit: true
logging_steps: 1
lora_alpha: 64
lora_mlp_kernel: true
lora_o_kernel: true
lora_qkv_kernel: true
lora_r: 32
lora_target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
- gate_proj
- down_proj
- up_proj
lr_scheduler: cosine
max_grad_norm: 0.1
micro_batch_size: 1
num_epochs: 1
optimizer: paged_adamw_8bit
output_dir: ./outputs/qwen-sft-pirate-rrr
plugins:
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
sample_packing: true
saves_per_epoch: 2
sequence_len: 4096
warmup_steps: 5
xformers_attention: true
yaml
adapter: qlora
base_model: Qwen/Qwen3-14B
bf16: false
chat_template: qwen3
dataloader_num_workers: 2
dataloader_pin_memory: true
dataloader_prefetch_factor: 8
datasets:
- eot_tokens:
  - <|im_end|>
  field_messages: conversations
  message_property_mappings:
    content: content
    role: role
  path: winglian/pirate-ultrachat-10k
  split: train
  type: chat_template
fp16: true
gradient_accumulation_steps: 1
gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
learning_rate: 0.00019
load_in_4bit: true
logging_steps: 1
lora_alpha: 64
lora_mlp_kernel: true
lora_o_kernel: true
lora_qkv_kernel: true
lora_r: 32
lora_target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
- gate_proj
- down_proj
- up_proj
lr_scheduler: cosine
max_grad_norm: 0.1
micro_batch_size: 1
num_epochs: 1
optimizer: paged_adamw_8bit
output_dir: ./outputs/qwen-sft-pirate-rrr
plugins:
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
sample_packing: true
saves_per_epoch: 2
sequence_len: 4096
warmup_steps: 5
xformers_attention: true
yaml
adapter: qlora
base_model: Qwen/Qwen3-14B
bf16: false
chat_template: qwen3
dataloader_num_workers: 2
dataloader_pin_memory: true
dataloader_prefetch_factor: 8
datasets:
- eot_tokens:
  - <|im_end|>
  field_messages: conversations
  message_property_mappings:
    content: content
    role: role
  path: winglian/pirate-ultrachat-10k
  split: train
  type: chat_template
fp16: true
gradient_accumulation_steps: 1
gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
learning_rate: 0.00019
load_in_4bit: true
logging_steps: 1
lora_alpha: 64
lora_mlp_kernel: true
lora_o_kernel: true
lora_qkv_kernel: true
lora_r: 32
lora_target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
- gate_proj
- down_proj
- up_proj
lr_scheduler: cosine
max_grad_norm: 0.1
micro_batch_size: 1
num_epochs: 1
optimizer: paged_adamw_8bit
output_dir: ./outputs/qwen-sft-pirate-rrr
plugins:
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
sample_packing: true
saves_per_epoch: 2
sequence_len: 4096
warmup_steps: 5
xformers_attention: true
yaml
adapter: qlora
base_model: Qwen/Qwen3-14B
bf16: false
chat_template: qwen3
dataloader_num_workers: 2
dataloader_pin_memory: true
dataloader_prefetch_factor: 8
datasets:
- eot_tokens:
  - <|im_end|>
  field_messages: conversations
  message_property_mappings:
    content: content
    role: role
  path: winglian/pirate-ultrachat-10k
  split: train
  type: chat_template
fp16: true
gradient_accumulation_steps: 1
gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
learning_rate: 0.00019
load_in_4bit: true
logging_steps: 1
lora_alpha: 64
lora_mlp_kernel: true
lora_o_kernel: true
lora_qkv_kernel: true
lora_r: 32
lora_target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
- gate_proj
- down_proj
- up_proj
lr_scheduler: cosine
max_grad_norm: 0.1
micro_batch_size: 1
num_epochs: 1
optimizer: paged_adamw_8bit
output_dir: ./outputs/qwen-sft-pirate-rrr
plugins:
- axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
sample_packing: true
saves_per_epoch: 2
sequence_len: 4096
warmup_steps: 5
xformers_attention: true

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.