You can load the laff model using the following steps:
import torch
from transformers import BertTokenizer, BertForSequenceClassification
from base_models import BinaryClassificationBertModel
from huggingface_hub import hf_hub_download
file_path = hf_hub_download(repo_id="mandeep-rathee/laff-model", filename="bert-base-laff.pth")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
base_model_name = "bert-base-uncased"
tokenizer = BertTokenizer.from_pretrained(base_model_name, torch_dtype=torch.float16)
base_model = BertForSequenceClassification.from_pretrained(base_model_name, num_labels=1,torch_dtype=torch.float16)
model = BinaryClassificationBertModel(base_model)
model.load_state_dict(torch.load(file_path, map_location=device))
model.to(device)
Model tree for mandeep-rathee/laff-model
Base model
google-bert/bert-base-uncased