L3.3-GeneticLemonade-Final-v2-70B

47
9
llama
by
zerofata
Language Model
OTHER
70B params
New
47 downloads
Early-stage
Edge AI:
Mobile
Laptop
Server
157GB+ RAM
Mobile
Laptop
Server
Quick Summary

body { font-family: sans-serif; color: #f0f0f0; line-height: 1.

Device Compatibility

Mobile
4-6GB RAM
Laptop
16GB RAM
Server
GPU
Minimum Recommended
66GB+ RAM

Code Examples

====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name
====================yaml
# ====================
# MODEL CONFIGURATION
# ====================
base_model: zerofata/L3.3-GeneticLemonade-Unleashed-70B
model_type: AutoModelForCausalLM
tokenizer_type: AutoTokenizer
special_tokens:
  pad_token: "<|finetune_right_pad_id|>"
chat_template: llama3

# ====================
# DATASET CONFIGURATION
# ====================
datasets:
  - path: ./dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

test_datasets:
  - path: ./validate_dataset.jsonl
    type: chat_template
    split: train
    chat_template_strategy: tokenizer
    field_messages: messages
    message_property_mappings:
      role: role
      content: content
    roles:
      user: ["user"]
      assistant: ["assistant"]
      system: ["system"]

dataset_prepared_path:
train_on_inputs: false  # Only train on assistant responses

# ====================
# QLORA CONFIGURATION
# ====================
adapter: qlora
load_in_4bit: true
lora_r: 64
lora_alpha: 128
lora_dropout: 0.1
lora_target_linear: true
# lora_modules_to_save:  # Uncomment only if you added NEW tokens

# ====================
# TRAINING PARAMETERS
# ====================
num_epochs: 2
micro_batch_size: 4
gradient_accumulation_steps: 2
learning_rate: 1.5e-5
optimizer: paged_adamw_8bit
lr_scheduler: rex
warmup_ratio: 0.05
weight_decay: 0.01
max_grad_norm: 1.0

# ====================
# SEQUENCE & PACKING
# ====================
sequence_len: 8192
sample_packing: true
eval_sample_packing: false
pad_to_sequence_len: true

# ====================
# HARDWARE OPTIMIZATIONS
# ====================
bf16: auto
flash_attention: true
gradient_checkpointing: true

# ====================
# EVALUATION & CHECKPOINTING
# ====================
evaluation_strategy: steps
eval_steps: 5
save_strategy: steps
save_steps: 5
save_total_limit: 5  # Keep best + last few checkpoints
load_best_model_at_end: true
metric_for_best_model: eval_loss
greater_is_better: false
early_stopping_patience: 5

# ====================
# LOGGING & OUTPUT
# ====================
output_dir: ./output_model
logging_steps: 2
save_safetensors: true

# ====================
# WANDB TRACKING
# ====================
wandb_project: project_name
# wandb_entity: your_entity
# wandb_name: your_run_name

Deploy This Model

Production-ready deployment in minutes

Together.ai

Instant API access to this model

Fastest API

Production-ready inference API. Start free, scale to millions.

Try Free API

Replicate

One-click model deployment

Easiest Setup

Run models in the cloud with simple API. No DevOps required.

Deploy Now

Disclosure: We may earn a commission from these partners. This helps keep LLMYourWay free.