Upload model
Browse files- config.json +16 -0
- configuration_arabichar.py +18 -0
- modeling_arabichar.py +63 -0
- pytorch_model.bin +3 -0
config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"ArabiCharModelForImageClassification"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "configuration_arabichar.ArabiCharModelConfig",
|
7 |
+
"AutoModelForImageClassification": "modeling_arabichar.ArabiCharModelForImageClassification"
|
8 |
+
},
|
9 |
+
"conv1_channels": 32,
|
10 |
+
"conv2_channels": 64,
|
11 |
+
"dropout_prob": 0.4,
|
12 |
+
"fc1_units": 128,
|
13 |
+
"num_classes": 28,
|
14 |
+
"torch_dtype": "float32",
|
15 |
+
"transformers_version": "4.32.1"
|
16 |
+
}
|
configuration_arabichar.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PretrainedConfig
|
2 |
+
|
3 |
+
class ArabiCharModelConfig(PretrainedConfig):
|
4 |
+
def __init__(
|
5 |
+
self,
|
6 |
+
num_classes=28,
|
7 |
+
conv1_channels=32,
|
8 |
+
conv2_channels=64,
|
9 |
+
fc1_units=128,
|
10 |
+
dropout_prob=0.4,
|
11 |
+
**kwargs
|
12 |
+
):
|
13 |
+
super().__init__(**kwargs)
|
14 |
+
self.num_classes = num_classes
|
15 |
+
self.conv1_channels = conv1_channels
|
16 |
+
self.conv2_channels = conv2_channels
|
17 |
+
self.fc1_units = fc1_units
|
18 |
+
self.dropout_prob = dropout_prob
|
modeling_arabichar.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Mapping
|
2 |
+
from .configuration_arabichar import ArabiCharModelConfig
|
3 |
+
from transformers import PreTrainedModel
|
4 |
+
import torch
|
5 |
+
import torch.nn as nn
|
6 |
+
|
7 |
+
class ArabiCharModel(nn.Module):
|
8 |
+
def __init__(self, config):
|
9 |
+
super(ArabiCharModel, self).__init__()
|
10 |
+
|
11 |
+
self.conv1 = nn.Conv2d(1, config.conv1_channels, kernel_size=5, padding=4)
|
12 |
+
self.conv2 = nn.Conv2d(config.conv1_channels, config.conv1_channels, kernel_size=5)
|
13 |
+
self.conv3 = nn.Conv2d(config.conv1_channels, config.conv1_channels, kernel_size=5)
|
14 |
+
self.pool1 = nn.MaxPool2d(2)
|
15 |
+
self.bn1 = nn.BatchNorm2d(config.conv1_channels)
|
16 |
+
|
17 |
+
self.conv4 = nn.Conv2d(config.conv1_channels, config.conv2_channels, kernel_size=5, padding=4)
|
18 |
+
self.conv5 = nn.Conv2d(config.conv2_channels, config.conv2_channels, kernel_size=5)
|
19 |
+
self.conv6 = nn.Conv2d(config.conv2_channels, config.conv2_channels, kernel_size=5)
|
20 |
+
self.pool2 = nn.MaxPool2d(2)
|
21 |
+
self.bn2 = nn.BatchNorm2d(config.conv2_channels)
|
22 |
+
|
23 |
+
self.fc1 = nn.Linear(config.conv2_channels * 5 * 5, config.fc1_units)
|
24 |
+
self.fc2 = nn.Linear(config.fc1_units, config.fc1_units)
|
25 |
+
self.dropout = nn.Dropout(config.dropout_prob)
|
26 |
+
self.fc3 = nn.Linear(config.fc1_units, config.num_classes)
|
27 |
+
|
28 |
+
def forward(self, x):
|
29 |
+
x = torch.relu(self.conv1(x))
|
30 |
+
x = torch.relu(self.conv2(x))
|
31 |
+
x = torch.relu(self.conv3(x))
|
32 |
+
x = self.pool1(x)
|
33 |
+
x = self.bn1(x)
|
34 |
+
|
35 |
+
x = torch.relu(self.conv4(x))
|
36 |
+
x = torch.relu(self.conv5(x))
|
37 |
+
x = torch.relu(self.conv6(x))
|
38 |
+
x = self.pool2(x)
|
39 |
+
x = self.bn2(x)
|
40 |
+
|
41 |
+
x = x.view(x.size(0), -1)
|
42 |
+
x = torch.relu(self.fc1(x))
|
43 |
+
x = torch.relu(self.fc2(x))
|
44 |
+
x = self.dropout(x)
|
45 |
+
return torch.softmax(self.fc3(x), dim=1)
|
46 |
+
|
47 |
+
class ArabiCharModelForImageClassification(PreTrainedModel):
|
48 |
+
config_class = ArabiCharModelConfig
|
49 |
+
def __init__(self, config):
|
50 |
+
super().__init__(config)
|
51 |
+
self.model = ArabiCharModel(config)
|
52 |
+
|
53 |
+
def forward(self, tensor, labels=None):
|
54 |
+
logits = self.model(tensor)
|
55 |
+
if labels is not None:
|
56 |
+
loss = torch.nn.cross_entropy(logits, labels)
|
57 |
+
return {"loss": loss, "logits": logits}
|
58 |
+
|
59 |
+
return {"logits": logits}
|
60 |
+
|
61 |
+
def load_state_dict(self, model_name):
|
62 |
+
self.model.load_state_dict(torch.load(model_name))
|
63 |
+
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:14729bd70797b50e42399038f0782bae18f0b8fbe732660df74036e1e7cab4cc
|
3 |
+
size 2143045
|