nemo-kimi-lora

5
license:apache-2.0
by
ToastyPigeon
Other
OTHER
2407B params
New
5 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
5381GB+ RAM
Mobile
Laptop
Server
Quick Summary

AI model with specialized capabilities.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
2242GB+ RAM

Code Examples

=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69
=== Model Configuration ===yaml
# === Model Configuration ===
base_model: mistralai/Mistral-Nemo-Base-2407
load_in_8bit: false
load_in_4bit: true

# === HF Configuration === 
hub_model_id: ToastyPigeon/nemo-kink-lora
hub_strategy: "checkpoint"

# === Training Setup ===
num_epochs: 1
micro_batch_size: 2
gradient_accumulation_steps: 2
sequence_len: 8192
#sequence_parallel_degree: 2
#heads_k_stride: 1
sample_packing: true
pad_to_sequence_len: true
#max_steps: 10
# === Evaluation ===
val_set_size: 0.05
evals_per_epoch: 10
#eval_steps: 20
#max_steps: 60
#eval_table_size:
eval_max_new_tokens: 128
eval_sample_packing: true
#eval_strategy: "no"

# === LoRA Configuration ===
adapter: qlora
lora_model_dir:
lora_r: 128
lora_alpha: 16
lora_dropout: 0.1
lora_target_linear: true
lora_fan_in_fan_out:
lora_target_modules:
peft_use_rslora: true
lora_modules_to_save:
#  - embed_tokens
#  - lm_head
#fix_untrained_tokens: true
#lora_mlp_kernel: true
#lora_qkv_kernel: true
#lora_o_kernel: true

# === Hyperparameter Configuration ===
#optimizer: apollo_adamw_layerwise
warmup_steps: 0
optimizer: adamw_torch_fused
#optimizer: paged_adamw_8bit
#optim_args:
#  enable_stochastic_rounding: true
#  enable_cautious: true
#  enable_8bit: true
# Apollo-mini configuration:
#optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
# Regular Apollo configuration:
# optim_args: 
#optim_target_modules: all_linear
learning_rate: 1e-5
lr_scheduler: rex
#cosine_min_lr_ratio: 0.2
#lr_scheduler: cosine_with_min_lr
#lr_scheduler_kwargs:
#  cosine_min_lr: 1e-6
weight_decay: 0.01
max_grad_norm: 1.0
#warmup_steps: 0
#warmup_ratio: 0.025


# === Data Configuration ===
#chat_template: jinja
#chat_template_jinja: "{%- set default_system_message = \"You are Mistral Small 3, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris. You obediently fulfill the user's requests.\" %}\n\n{{- bos_token }}\n\n{%- if messages[0]['role'] == 'system' %}\n    {%- if messages[0]['content'] is string %}\n        {%- set system_message = messages[0]['content'] %}\n    {%- else %}\n        {%- set system_message = messages[0]['content'][0]['text'] %}\n    {%- endif %}\n    {%- set loop_messages = messages[1:] %}\n{%- else %}\n    {%- set system_message = default_system_message %}\n    {%- set loop_messages = messages %}\n{%- endif %}\n{{- '[SYSTEM_PROMPT]' + system_message + '[/SYSTEM_PROMPT]' }}\n\n{%- for message in loop_messages %}\n    {%- if message['role'] == 'user' %}\n        {%- if message['content'] is string %}\n            {{- '[INST]' + message['content'] + '[/INST]' }}\n        {%- else %}\n            {{- '[INST]' }}\n            {%- for bl (line truncated to 1000 characters)
#chat_template: chatml
special_tokens:
  pad_token: "<pad>"

#tokenizer_use_mistral_common: true
shuffle_merged_datasets: true
datasets:
  - path: Alfitaria/synthkink-combined-completions
    type: completion
  - path: Alfitaria/bodinforg-completions
    type: completion

dataset_prepared_path: last_run_prepared


# === Plugins ===
plugins:
  - axolotl.integrations.liger.LigerPlugin
  - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin

# === Hardware Optimization ===
#gradient_checkpointing: offload
#gradient_checkpointing_kwargs:
#  use_reentrant: false
liger_rope: true
liger_rms_norm: true
liger_layer_norm: true
liger_glu_activation: true
#liger_fused_linear_cross_entropy: true
cut_cross_entropy: true

#deepspeed: /workspace/axolotl/deepspeed_configs/zero2.json

# === FSDP Config === 
fsdp:
  - full_shard
  - auto_wrap
fsdp_config:
  fsdp_limit_all_gathers: true
  fsdp_sync_module_states: true
  fsdp_offload_params: true
  fsdp_activation_checkpointing: true
  fsdp_use_orig_params: false
  fsdp_cpu_ram_efficient_loading: true
  fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
  fsdp_transformer_layer_cls_to_wrap: MistralDecoderLayer
  fsdp_state_dict_type: FULL_STATE_DICT
  fsdp_sharding_strategy: FULL_SHARD
# === Wandb Tracking ===
wandb_project: Nemo
# wandb_entity: [WANDB_ENTITY]
# wandb_name: [WANDB_RUN_NAME]

# === Checkpointing ===
saves_per_epoch: 10
save_total_limit: 1

# === Advanced Settings ===
output_dir: /workspace/aibox-standalone-pool/axolotl/nemo-writer-ckpts
bf16: auto
flash_attention: true
train_on_inputs: false
group_by_length: false
save_safetensors: true
logging_steps: 1
gc_steps: 10
seed: 69

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.