Quiet-Star-Custom / train-h100-sharegpt-sft.py
Crystalcareai's picture
Update train-h100-sharegpt-sft.py
2892566 verified
raw
history blame
5.35 kB
import torch
torch.backends.cuda.matmul.allow_tf32 = True
import random
from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline, AutoConfig, BitsAndBytesConfig
from datasets import load_dataset
from transformers import TrainingArguments
from trl import SFTTrainer
from peft import LoraConfig
from torch.nn import CrossEntropyLoss
import time
import gc
random_seed = 42
torch.manual_seed(random_seed)
random.seed(random_seed)
dataset = load_dataset("HuggingFaceH4/orca-math-word-problems-200k", split="train_sft").select(range(1500))
n_ahead_talk_global = 1
n_passes_global = 1
n_ahead_global = 8
n_examples = 0
def model_init(params):
original = False
if params is None:
params = {}
else:
params = params.params
# save params to file
n_ahead = params.get("n_ahead", n_ahead_global if not original else 1)
n_ahead_talk = params.get("n_ahead_talk", n_ahead_talk_global if not original else 1)
n_passes = params.get("n_passes", n_passes_global if not original else 1)
gumbel_temperature = params.get("gumbel_temperature", 1)
use_start_thought_token = params.get("use_start_thought_token", True)
use_end_thought_token = params.get("use_end_thought_token", True)
include_policy_loss = params.get("include_policy_loss", True)
gumbel_detach = params.get("gumbel_detach", True)
merged_talk_heads = params.get("merged_talk_heads", True)
residual_think_head = params.get("residual_think_head", False)
optimize_lm_head_only_at_start = params.get("optimize_lm_head_only_at_start", False)
model_id = "Crystalcareai/Quiet-Star-Custom"
tokenizer_id = model_id
print("Loading model")
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
max_thoughts=n_ahead + n_ahead_talk + 1,
merged_talk_heads=merged_talk_heads,
merged_lm_and_talk_heads=False,
merged_lm_and_think_heads=True,
use_concat_talk_head=True,
use_shallow_think=True,
use_shallow_talk=False,
use_complex_think_head=False,
use_complex_talk_head=True,
use_weighted_talk_head=True,
trust_remote_code=True,
device_map="auto",
)
print("Loaded model")
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, truncation=True, padding_side="right")
tokenizer.pad_token_id = tokenizer.eos_token_id
special_tokens_to_add = []
if model.use_start_thought_token:
special_tokens_to_add.append("<|startthought|>")
if model.use_end_thought_token:
special_tokens_to_add.append("<|endthought|>")
if special_tokens_to_add:
tokenizer.add_special_tokens({"additional_special_tokens": special_tokens_to_add})
model.tokenizer = tokenizer
for name, module in model.named_modules():
if "embed" in name:
print(module, flush=True)
model.gumbel_detach = gumbel_detach
model.include_policy_loss = include_policy_loss
model.use_end_thought_token = use_end_thought_token
model.use_start_thought_token = use_start_thought_token
model.n_ahead = n_ahead
model.n_ahead_talk = n_ahead_talk
model.n_passes = n_passes
model.residual_think_head = residual_think_head
model.optimize_lm_head_only_at_start = optimize_lm_head_only_at_start
model.gumbel_temperature = gumbel_temperature
model.original_mode = original
model.config_params = params
model.run_start = int(time.time())
model.train()
return model
def clear_gpu_cache():
torch.cuda.empty_cache()
gc.collect()
class CustomSFTTrainer(SFTTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cache_clear_step = 6 # Clear cache every 100 steps
def on_step_end(self, args, state, control, **kwargs):
if state.global_step % self.cache_clear_step == 0:
clear_gpu_cache()
return super().on_step_end(args, state, control, **kwargs)
max_seq_length = 8092
run_id = int(time.time())
training_args = TrainingArguments(
output_dir="./out",
num_train_epochs=3,
per_device_train_batch_size=1,
gradient_checkpointing=False,
gradient_accumulation_steps=16,
optim="galore_adamw",
optim_target_modules=[ r".*mlp.*"],
# optim="adamw_torch_fused",
logging_steps=1,
save_strategy="steps",
save_steps=1000,
max_steps=-1,
bf16=True,
tf32=True,
learning_rate=2e-10,
max_grad_norm=1.0,
warmup_steps=20,
lr_scheduler_type="constant",
push_to_hub=False,
report_to="wandb"
)
# peft_config = LoraConfig(
# r = 8, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
# target_modules =["q_proj", "v_proj"],
# lora_alpha = 32,
# lora_dropout = 0, # Supports any, but = 0 is optimized
# bias = "none",
# use_dora=True,
# )
torch.autograd.set_detect_anomaly(True)
# Set the device for each process
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model_init(None) # Initialize the model
tokenizer = model.tokenizer
trainer = CustomSFTTrainer(
args=training_args,
train_dataset=dataset,
model=model,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
# peft_config=peft_config,
)
trainer.train()