Datasets:
Tasks:
Text2Text Generation
Modalities:
Text
Formats:
json
Languages:
Indonesian
Size:
1M<n<10M
Tags:
code
License:
from transformers import AlbertTokenizer, AlbertForSequenceClassification | |
from datasets import load_dataset | |
from transformers import Trainer, TrainingArguments | |
# Load dataset (ganti dengan nama dataset dan versi kamu) | |
dataset = load_dataset('your_username/your_dataset_name', 'your_dataset_version') | |
# Load tokenizer and model | |
tokenizer = AlbertTokenizer.from_pretrained('google/albert-base-v2') | |
model = AlbertForSequenceClassification.from_pretrained('google/albert-base-v2') | |
# Define preprocessing function | |
def preprocess_function(examples): | |
return tokenizer(examples['text'], truncation=True, padding='max_length') | |
# Preprocess dataset | |
encoded_dataset = dataset.map(preprocess_function, batched=True) | |
# Define training arguments | |
training_args = TrainingArguments( | |
output_dir='./results', | |
per_device_train_batch_size=8, | |
num_train_epochs=3, | |
evaluation_strategy="epoch", | |
save_strategy="epoch", | |
) | |
# Create trainer | |
trainer = Trainer( | |
model=model, | |
args=training_args, | |
train_dataset=encoded_dataset['train'], | |
eval_dataset=encoded_dataset['validation'], | |
) | |
# Train model | |
trainer.train() | |
# Save trained model | |
trainer.save_model('./your_private_albert_model') |