Sadjad Alikhani commited on
Commit
6647b6a
·
verified ·
1 Parent(s): 9ad0918

Upload model.py

Browse files
Files changed (1) hide show
  1. model.py +197 -0
model.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Sun Sep 15 19:55:23 2024
4
+
5
+ @author: salikha4
6
+ """
7
+
8
+ import os
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ import numpy as np
13
+
14
+ from inference import *
15
+ from load_data import load_DeepMIMO_data
16
+ from input_preprocess import *
17
+ from lwm_model import LWM, load_model
18
+
19
+
20
+ ELEMENT_LENGTH = 16
21
+ D_MODEL = 64
22
+ MAX_LEN = 129
23
+ N_LAYERS = 12
24
+ N_HEADS = 12
25
+ D_FF = D_MODEL * 4
26
+ D_K = D_MODEL // N_HEADS
27
+ D_V = D_MODEL // N_HEADS
28
+ DROPOUT = 0.1
29
+
30
+ class LayerNormalization(nn.Module):
31
+ def __init__(self, d_model: int, eps: float = 1e-6) -> None:
32
+ super().__init__()
33
+ self.eps = eps
34
+ self.alpha = nn.Parameter(torch.ones(d_model))
35
+ self.bias = nn.Parameter(torch.zeros(d_model))
36
+
37
+ def forward(self, x):
38
+ mean = x.mean(dim=-1, keepdim=True)
39
+ std = x.std(dim=-1, keepdim=True)
40
+ return self.alpha * (x - mean) / (std + self.eps) + self.bias
41
+
42
+ class Embedding(nn.Module):
43
+ def __init__(self, element_length, d_model, max_len):
44
+ super().__init__()
45
+ self.element_length = element_length
46
+ self.d_model = d_model
47
+ self.proj = nn.Linear(element_length, d_model)
48
+ self.pos_embed = nn.Embedding(max_len, d_model)
49
+ self.norm = LayerNormalization(d_model)
50
+
51
+ def forward(self, x):
52
+ seq_len = x.size(1)
53
+ pos = torch.arange(seq_len, dtype=torch.long, device=x.device)
54
+ pos = pos.unsqueeze(0).expand_as(x[:, :, 0])
55
+ tok_emb = self.proj(x.float())
56
+ embedding = tok_emb + self.pos_embed(pos)
57
+ return self.norm(embedding)
58
+
59
+ class ScaledDotProductAttention(nn.Module):
60
+ def __init__(self):
61
+ super().__init__()
62
+
63
+ def forward(self, Q, K, V):
64
+ scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(D_K)
65
+ attn = F.softmax(scores, dim=-1)
66
+ context = torch.matmul(attn, V)
67
+ return context, attn
68
+
69
+ class MultiHeadAttention(nn.Module):
70
+ def __init__(self):
71
+ super().__init__()
72
+ self.W_Q = nn.Linear(D_MODEL, D_K * N_HEADS)
73
+ self.W_K = nn.Linear(D_MODEL, D_K * N_HEADS)
74
+ self.W_V = nn.Linear(D_MODEL, D_V * N_HEADS)
75
+ self.linear = nn.Linear(N_HEADS * D_V, D_MODEL)
76
+ self.norm = LayerNormalization(D_MODEL)
77
+ self.dropout = nn.Dropout(DROPOUT)
78
+
79
+ def forward(self, Q, K, V):
80
+ residual, batch_size = Q, Q.size(0)
81
+ q_s = self.W_Q(Q).view(batch_size, -1, N_HEADS, D_K).transpose(1, 2)
82
+ k_s = self.W_K(K).view(batch_size, -1, N_HEADS, D_K).transpose(1, 2)
83
+ v_s = self.W_V(V).view(batch_size, -1, N_HEADS, D_V).transpose(1, 2)
84
+
85
+ context, attn = ScaledDotProductAttention()(q_s, k_s, v_s)
86
+ output = context.transpose(1, 2).contiguous().view(batch_size, -1, N_HEADS * D_V)
87
+ output = self.linear(output)
88
+ return residual + self.dropout(output), attn #residual + self.dropout(output), attn
89
+
90
+ class PoswiseFeedForwardNet(nn.Module):
91
+ def __init__(self):
92
+ super().__init__()
93
+ self.fc1 = nn.Linear(D_MODEL, D_FF)
94
+ self.fc2 = nn.Linear(D_FF, D_MODEL)
95
+ self.dropout = nn.Dropout(DROPOUT)
96
+ self.norm = LayerNormalization(D_MODEL)
97
+
98
+ def forward(self, x):
99
+ output = self.fc2(self.dropout(F.relu(self.fc1(x))))
100
+ return x + self.dropout(output) #x + self.dropout(output)
101
+
102
+ class EncoderLayer(nn.Module):
103
+ def __init__(self):
104
+ super().__init__()
105
+ self.enc_self_attn = MultiHeadAttention()
106
+ self.pos_ffn = PoswiseFeedForwardNet()
107
+ self.norm = LayerNormalization(D_MODEL)
108
+
109
+ def forward(self, enc_inputs):
110
+ attn_outputs, attn = self.enc_self_attn(enc_inputs, enc_inputs, enc_inputs)
111
+ attn_outputs = self.norm(attn_outputs)
112
+ enc_outputs = self.pos_ffn(attn_outputs)
113
+ return enc_outputs, attn
114
+
115
+ # class LWM(torch.nn.Module):
116
+ # def __init__(self, element_length=16, d_model=64, max_len=129, n_layers=12):
117
+ # super().__init__()
118
+
119
+ # self.embedding = Embedding(element_length, d_model, max_len)
120
+ # self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
121
+ # self.linear = nn.Linear(d_model, d_model)
122
+ # self.norm = LayerNormalization(d_model)
123
+
124
+ # embed_weight = self.embedding.proj.weight
125
+ # d_model, n_dim = embed_weight.size()
126
+ # self.decoder = nn.Linear(d_model, n_dim, bias=False)
127
+ # self.decoder.weight = nn.Parameter(embed_weight.transpose(0, 1))
128
+ # self.decoder_bias = nn.Parameter(torch.zeros(n_dim))
129
+
130
+ # @classmethod
131
+ # def from_pretrained(cls, ckpt_name='model_weights.pth', device='cuda'):
132
+ # # Define model
133
+ # model = cls().to(device)
134
+
135
+ # # Download the model weights (from a remote or local repository)
136
+ # ckpt_path = f'https://huggingface.co/sadjadalikhani/LWM/resolve/main/{ckpt_name}'
137
+
138
+ # # Load the model weights
139
+ # model.load_state_dict(torch.hub.load_state_dict_from_url(ckpt_path, map_location=device))
140
+ # print(f"Model loaded successfully from {ckpt_path} to {device}")
141
+
142
+ # return model
143
+
144
+ # def forward(self, input_ids, masked_pos):
145
+ # output = self.embedding(input_ids)
146
+
147
+ # for layer in self.layers:
148
+ # output, _ = layer(output)
149
+
150
+ # masked_pos = masked_pos.long()[:, :, None].expand(-1, -1, output.size(-1))
151
+ # h_masked = torch.gather(output, 1, masked_pos)
152
+ # h_masked = self.norm(F.relu(self.linear(h_masked)))
153
+ # logits_lm = self.decoder(h_masked) + self.decoder_bias
154
+
155
+ # return logits_lm, output
156
+
157
+ from huggingface_hub import hf_hub_download
158
+ import torch
159
+
160
+ class LWM(torch.nn.Module):
161
+ def __init__(self, element_length=16, d_model=64, max_len=129, n_layers=12):
162
+ super().__init__()
163
+ # Model architecture...
164
+ self.embedding = Embedding(element_length, d_model, max_len)
165
+ self.layers = nn.ModuleList([EncoderLayer() for _ in range(n_layers)])
166
+ self.linear = nn.Linear(d_model, d_model)
167
+ self.norm = LayerNormalization(d_model)
168
+ embed_weight = self.embedding.proj.weight
169
+ d_model, n_dim = embed_weight.size()
170
+ self.decoder = nn.Linear(d_model, n_dim, bias=False)
171
+ self.decoder.weight = nn.Parameter(embed_weight.transpose(0, 1))
172
+ self.decoder_bias = nn.Parameter(torch.zeros(n_dim))
173
+
174
+ @classmethod
175
+ def from_pretrained(cls, ckpt_name='model_weights.pth', device='cuda', use_auth_token=None):
176
+ # Define model
177
+ model = cls().to(device)
178
+
179
+ # Download model weights using Hugging Face Hub
180
+ ckpt_path = hf_hub_download(repo_id="sadjadalikhani/LWM", filename=ckpt_name, use_auth_token=use_auth_token)
181
+
182
+ # Load the model weights
183
+ model.load_state_dict(torch.load(ckpt_path, map_location=device))
184
+ print(f"Model loaded successfully from {ckpt_path} to {device}")
185
+
186
+ return model
187
+
188
+ def forward(self, input_ids, masked_pos):
189
+ # Define the forward pass
190
+ output = self.embedding(input_ids)
191
+ for layer in self.layers:
192
+ output, _ = layer(output)
193
+ masked_pos = masked_pos.long()[:, :, None].expand(-1, -1, output.size(-1))
194
+ h_masked = torch.gather(output, 1, masked_pos)
195
+ h_masked = self.norm(F.relu(self.linear(h_masked)))
196
+ logits_lm = self.decoder(h_masked) + self.decoder_bias
197
+ return logits_lm, output