Austral-Qwen3-235B
4
4
235.0B
—
by
Aurore-Reveil
Other
OTHER
235B params
New
4 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
526GB+ RAM
Mobile
Laptop
Server
Quick Summary
It's an SFT ontop of the largest Qwen which nobody seems to have done yet, Trained with a collection of normal Austral(Books, RP Logs, LNs, etc) datasets.
Device Compatibility
Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
219GB+ RAM
Code Examples
Torchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFOTorchtune configyaml
output_dir: ./qwen3_235B_A22B_austral/full
tokenizer:
_component_: torchtune.models.qwen3.qwen3_tokenizer
path: ./Qwen3-235B-A22B-tt/vocab.json
merges_file: ./Qwen3-235B-A22B-tt/merges.txt
max_seq_len: 32768
dataset:
_component_: torchtune.datasets.pretokenized_dataset
source: IntervitensInc/test_235B_2-pack
split: train
packed: true
seed: 42
shuffle: false
model:
_component_: torchtune.models.qwen3.qwen3_moe_235b_a22b
checkpointer:
_component_: torchtune.training.FullModelTorchTuneCheckpointer
checkpoint_dir: ./Qwen3-235B-A22B-tt
checkpoint_files:
- model-00001-of-00001.bin
recipe_checkpoint: null
output_dir: ${output_dir}
model_type: QWEN3_MOE
resume_from_checkpoint: false
enable_async_checkpointing: false
batch_size: 1
epochs: 4
optimizer:
_component_: torchao.optim.AdamW8bit
lr: 3.0e-06
lr_scheduler:
_component_: torchtune.training.lr_schedulers.get_rex_scheduler
num_warmup_steps: 100
loss:
_component_: torchtune.modules.loss.LinearCrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
clip_grad_norm: null
compile:
model: true
loss: true
scale_grads: true
optimizer_step: false
optimizer_in_bwd: true
device: cuda
enable_activation_checkpointing: true
enable_activation_offloading: true
custom_sharded_layers:
- tok_embeddings
- output
fsdp_cpu_offload: false
dtype: bf16
metric_logger:
_component_: torchtune.training.metric_logging.WandBLogger
project: qwen3-235-a22b-austral
log_every_n_steps: 1
log_peak_memory_stats: true
log_level: INFODeploy This Model
Production-ready deployment in minutes
Together.ai
Instant API access to this model
Production-ready inference API. Start free, scale to millions.
Try Free APIReplicate
One-click model deployment
Run models in the cloud with simple API. No DevOps required.
Deploy NowDisclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.