qwen3-4b-math-kd-jsd-temp1-v2

4
4.0B
license:apache-2.0
by
winglian
Language Model
OTHER
4B params
New
4 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
9GB+ RAM
Mobile
Laptop
Server
Quick Summary

This model is a fine-tuned version of Qwen/Qwen3-4B-Base on the winglian/OpenThoughts-114k-math-correct-qwen3-14b-math-prepared-temp1 dataset.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
4GB+ RAM

Code Examples

yaml
base_model: Qwen/Qwen3-4B-Base
  # base_model: winglian/qwen3-14b-math

plugins:
  - axolotl.integrations.kd.KDPlugin
  - axolotl.integrations.liger.LigerPlugin

liger_rms_norm: true
liger_glu_activation: true

  # torch_compile: true

strict: false

kd_trainer: true
kd_ce_alpha: 0.4
kd_alpha: 1.0
kd_temperature: 1.0
kd_beta: 0.5
kd_normalize_topk: false

dataloader_prefetch_factor: 1
dataloader_num_workers: 2
dataloader_pin_memory: true

gc_steps: -1  # gc at the end of each epoch

chat_template: qwen3
datasets:
  - path: winglian/OpenThoughts-114k-math-correct-qwen3-14b-math-prepared-temp1
    type: chat_template
    split: train
    split_thinking: true
    eot_tokens:
      - "<|im_end|>"

skip_prepare_dataset: true
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out-kd-4b-offline-t1-v2

sequence_len: 8192
sample_packing: true
pad_to_sequence_len: true

wandb_project: kd-4b-math
wandb_entity: axolotl-ai
wandb_watch:
wandb_name:
wandb_log_model:

gradient_accumulation_steps: 1
micro_batch_size: 4
num_epochs: 2
optimizer: adamw_torch_fused
adam_beta2: 0.95
lr_scheduler: rex
learning_rate: 3e-5
max_grad_norm: 0.2
save_safetensors: true

bf16: true
tf32: true

gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
logging_steps: 1
flash_attention: true

warmup_steps: 100
evals_per_epoch: 4
saves_per_epoch: 2
debug:
weight_decay: 0.0
special_tokens:
  eos_token: <|im_end|>
deepspeed: deepspeed_configs/zero2_torch_compile.json
yaml
base_model: Qwen/Qwen3-4B-Base
  # base_model: winglian/qwen3-14b-math

plugins:
  - axolotl.integrations.kd.KDPlugin
  - axolotl.integrations.liger.LigerPlugin

liger_rms_norm: true
liger_glu_activation: true

  # torch_compile: true

strict: false

kd_trainer: true
kd_ce_alpha: 0.4
kd_alpha: 1.0
kd_temperature: 1.0
kd_beta: 0.5
kd_normalize_topk: false

dataloader_prefetch_factor: 1
dataloader_num_workers: 2
dataloader_pin_memory: true

gc_steps: -1  # gc at the end of each epoch

chat_template: qwen3
datasets:
  - path: winglian/OpenThoughts-114k-math-correct-qwen3-14b-math-prepared-temp1
    type: chat_template
    split: train
    split_thinking: true
    eot_tokens:
      - "<|im_end|>"

skip_prepare_dataset: true
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out-kd-4b-offline-t1-v2

sequence_len: 8192
sample_packing: true
pad_to_sequence_len: true

wandb_project: kd-4b-math
wandb_entity: axolotl-ai
wandb_watch:
wandb_name:
wandb_log_model:

gradient_accumulation_steps: 1
micro_batch_size: 4
num_epochs: 2
optimizer: adamw_torch_fused
adam_beta2: 0.95
lr_scheduler: rex
learning_rate: 3e-5
max_grad_norm: 0.2
save_safetensors: true

bf16: true
tf32: true

gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
logging_steps: 1
flash_attention: true

warmup_steps: 100
evals_per_epoch: 4
saves_per_epoch: 2
debug:
weight_decay: 0.0
special_tokens:
  eos_token: <|im_end|>
deepspeed: deepspeed_configs/zero2_torch_compile.json
yaml
base_model: Qwen/Qwen3-4B-Base
  # base_model: winglian/qwen3-14b-math

plugins:
  - axolotl.integrations.kd.KDPlugin
  - axolotl.integrations.liger.LigerPlugin

liger_rms_norm: true
liger_glu_activation: true

  # torch_compile: true

strict: false

kd_trainer: true
kd_ce_alpha: 0.4
kd_alpha: 1.0
kd_temperature: 1.0
kd_beta: 0.5
kd_normalize_topk: false

dataloader_prefetch_factor: 1
dataloader_num_workers: 2
dataloader_pin_memory: true

gc_steps: -1  # gc at the end of each epoch

chat_template: qwen3
datasets:
  - path: winglian/OpenThoughts-114k-math-correct-qwen3-14b-math-prepared-temp1
    type: chat_template
    split: train
    split_thinking: true
    eot_tokens:
      - "<|im_end|>"

skip_prepare_dataset: true
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out-kd-4b-offline-t1-v2

sequence_len: 8192
sample_packing: true
pad_to_sequence_len: true

wandb_project: kd-4b-math
wandb_entity: axolotl-ai
wandb_watch:
wandb_name:
wandb_log_model:

gradient_accumulation_steps: 1
micro_batch_size: 4
num_epochs: 2
optimizer: adamw_torch_fused
adam_beta2: 0.95
lr_scheduler: rex
learning_rate: 3e-5
max_grad_norm: 0.2
save_safetensors: true

bf16: true
tf32: true

gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
logging_steps: 1
flash_attention: true

warmup_steps: 100
evals_per_epoch: 4
saves_per_epoch: 2
debug:
weight_decay: 0.0
special_tokens:
  eos_token: <|im_end|>
deepspeed: deepspeed_configs/zero2_torch_compile.json
yaml
base_model: Qwen/Qwen3-4B-Base
  # base_model: winglian/qwen3-14b-math

plugins:
  - axolotl.integrations.kd.KDPlugin
  - axolotl.integrations.liger.LigerPlugin

liger_rms_norm: true
liger_glu_activation: true

  # torch_compile: true

strict: false

kd_trainer: true
kd_ce_alpha: 0.4
kd_alpha: 1.0
kd_temperature: 1.0
kd_beta: 0.5
kd_normalize_topk: false

dataloader_prefetch_factor: 1
dataloader_num_workers: 2
dataloader_pin_memory: true

gc_steps: -1  # gc at the end of each epoch

chat_template: qwen3
datasets:
  - path: winglian/OpenThoughts-114k-math-correct-qwen3-14b-math-prepared-temp1
    type: chat_template
    split: train
    split_thinking: true
    eot_tokens:
      - "<|im_end|>"

skip_prepare_dataset: true
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out-kd-4b-offline-t1-v2

sequence_len: 8192
sample_packing: true
pad_to_sequence_len: true

wandb_project: kd-4b-math
wandb_entity: axolotl-ai
wandb_watch:
wandb_name:
wandb_log_model:

gradient_accumulation_steps: 1
micro_batch_size: 4
num_epochs: 2
optimizer: adamw_torch_fused
adam_beta2: 0.95
lr_scheduler: rex
learning_rate: 3e-5
max_grad_norm: 0.2
save_safetensors: true

bf16: true
tf32: true

gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
logging_steps: 1
flash_attention: true

warmup_steps: 100
evals_per_epoch: 4
saves_per_epoch: 2
debug:
weight_decay: 0.0
special_tokens:
  eos_token: <|im_end|>
deepspeed: deepspeed_configs/zero2_torch_compile.json
yaml
base_model: Qwen/Qwen3-4B-Base
  # base_model: winglian/qwen3-14b-math

plugins:
  - axolotl.integrations.kd.KDPlugin
  - axolotl.integrations.liger.LigerPlugin

liger_rms_norm: true
liger_glu_activation: true

  # torch_compile: true

strict: false

kd_trainer: true
kd_ce_alpha: 0.4
kd_alpha: 1.0
kd_temperature: 1.0
kd_beta: 0.5
kd_normalize_topk: false

dataloader_prefetch_factor: 1
dataloader_num_workers: 2
dataloader_pin_memory: true

gc_steps: -1  # gc at the end of each epoch

chat_template: qwen3
datasets:
  - path: winglian/OpenThoughts-114k-math-correct-qwen3-14b-math-prepared-temp1
    type: chat_template
    split: train
    split_thinking: true
    eot_tokens:
      - "<|im_end|>"

skip_prepare_dataset: true
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out-kd-4b-offline-t1-v2

sequence_len: 8192
sample_packing: true
pad_to_sequence_len: true

wandb_project: kd-4b-math
wandb_entity: axolotl-ai
wandb_watch:
wandb_name:
wandb_log_model:

gradient_accumulation_steps: 1
micro_batch_size: 4
num_epochs: 2
optimizer: adamw_torch_fused
adam_beta2: 0.95
lr_scheduler: rex
learning_rate: 3e-5
max_grad_norm: 0.2
save_safetensors: true

bf16: true
tf32: true

gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
logging_steps: 1
flash_attention: true

warmup_steps: 100
evals_per_epoch: 4
saves_per_epoch: 2
debug:
weight_decay: 0.0
special_tokens:
  eos_token: <|im_end|>
deepspeed: deepspeed_configs/zero2_torch_compile.json
yaml
base_model: Qwen/Qwen3-4B-Base
  # base_model: winglian/qwen3-14b-math

plugins:
  - axolotl.integrations.kd.KDPlugin
  - axolotl.integrations.liger.LigerPlugin

liger_rms_norm: true
liger_glu_activation: true

  # torch_compile: true

strict: false

kd_trainer: true
kd_ce_alpha: 0.4
kd_alpha: 1.0
kd_temperature: 1.0
kd_beta: 0.5
kd_normalize_topk: false

dataloader_prefetch_factor: 1
dataloader_num_workers: 2
dataloader_pin_memory: true

gc_steps: -1  # gc at the end of each epoch

chat_template: qwen3
datasets:
  - path: winglian/OpenThoughts-114k-math-correct-qwen3-14b-math-prepared-temp1
    type: chat_template
    split: train
    split_thinking: true
    eot_tokens:
      - "<|im_end|>"

skip_prepare_dataset: true
dataset_prepared_path: last_run_prepared
val_set_size: 0.0
output_dir: ./outputs/out-kd-4b-offline-t1-v2

sequence_len: 8192
sample_packing: true
pad_to_sequence_len: true

wandb_project: kd-4b-math
wandb_entity: axolotl-ai
wandb_watch:
wandb_name:
wandb_log_model:

gradient_accumulation_steps: 1
micro_batch_size: 4
num_epochs: 2
optimizer: adamw_torch_fused
adam_beta2: 0.95
lr_scheduler: rex
learning_rate: 3e-5
max_grad_norm: 0.2
save_safetensors: true

bf16: true
tf32: true

gradient_checkpointing: true
gradient_checkpointing_kwargs:
  use_reentrant: false
logging_steps: 1
flash_attention: true

warmup_steps: 100
evals_per_epoch: 4
saves_per_epoch: 2
debug:
weight_decay: 0.0
special_tokens:
  eos_token: <|im_end|>
deepspeed: deepspeed_configs/zero2_torch_compile.json

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.