# Install required packages before running: # pip install torch transformers datasets evaluate safetensors import torch from datasets import load_dataset from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer import evaluate # 1️⃣ Load Dataset dataset = load_dataset("imdb") # 2️⃣ Tokenizer model_name = "distilbert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(model_name) def tokenize_fn(batch): return tokenizer(batch["text"], truncation=True, padding="max_length", max_length=256) tokenized_datasets = dataset.map(tokenize_fn, batched=True) # 3️⃣ Load Model model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2) # 4️⃣ Evaluation metric accuracy = evaluate.load("accuracy") def compute_metrics(eval_pred): logits, labels = eval_pred predictions = torch.argmax(torch.tensor(logits), dim=-1) return accuracy.compute(predictions=predictions, references=labels) # 5️⃣ Training Arguments training_args = TrainingArguments( output_dir="./results", evaluation_strategy="epoch", save_strategy="epoch", logging_dir="./logs", learning_rate=2e-5, per_device_train_batch_size=8, per_device_eval_batch_size=8, num_train_epochs=1, weight_decay=0.01, push_to_hub=False, save_safetensors=True # 🔹 Save model in safetensors format ) # 6️⃣ Trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"].shuffle(seed=42).select(range(2000)), # small subset eval_dataset=tokenized_datasets["test"].shuffle(seed=42).select(range(500)), # small subset tokenizer=tokenizer, compute_metrics=compute_metrics ) # 7️⃣ Train Model trainer.train() # 8️⃣ Save final model in safetensors trainer.save_model("./final_safetensors_model") # saves as model.safetensors tokenizer.save_pretrained("./final_safetensors_model") print("✅ Training complete. Model saved in safetensors format at './final_safetensors_model'")