Asif Ahmad commited on
Commit
11764e7
·
1 Parent(s): 86b340d

Create LSTMbase_mining_model.py

Browse files
Files changed (1) hide show
  1. LSTMbase_mining_model.py +140 -0
LSTMbase_mining_model.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import tensorflow
3
+ from numpy import ndarray
4
+
5
+
6
+ class BaseMiningModel:
7
+ def __init__(self, features):
8
+ self.neurons = [[50,0]]
9
+ self.features = features
10
+ self.loaded_model = None
11
+ self.window_size = 100
12
+ self.model_dir = None
13
+ self.batch_size = 16
14
+ self.learning_rate = 0.01
15
+
16
+ def set_neurons(self, neurons):
17
+ self.neurons = neurons
18
+ return self
19
+
20
+ def set_window_size(self, window_size):
21
+ self.window_size = window_size
22
+ return self
23
+
24
+ def set_model_dir(self, model, stream_id=None):
25
+ if model is None and stream_id is not None:
26
+ self.model_dir = f'mining_models/{stream_id}.keras'
27
+ elif model is not None:
28
+ self.model_dir = model
29
+ else:
30
+ raise Exception("stream_id is not provided to define model")
31
+ return self
32
+
33
+ def set_batch_size(self, batch_size):
34
+ self.batch_size = batch_size
35
+ return self
36
+
37
+ def set_learning_rate(self, learning_rate):
38
+ self.learning_rate = learning_rate
39
+ return self
40
+
41
+ def load_model(self):
42
+ self.loaded_model = tensorflow.keras.models.load_model(self.model_dir)
43
+ return self
44
+
45
+ def train(self, data: ndarray, epochs: int = 100):
46
+ try:
47
+ model = tensorflow.keras.models.load_model(self.model_dir)
48
+ except OSError:
49
+ model = None
50
+
51
+ output_sequence_length = 100
52
+
53
+ if model is None:
54
+ model = tensorflow.keras.models.Sequential()
55
+
56
+ if len(self.neurons) > 1:
57
+ model.add(tensorflow.keras.layers.Bidirectional(tensorflow.keras.layers.LSTM(self.neurons[0][0],
58
+ input_shape=(self.window_size, self.features),
59
+ return_sequences=True)))
60
+ for ind, stack in enumerate(self.neurons[1:]):
61
+ return_sequences = True
62
+ if ind+1 == len(self.neurons)-1:
63
+ return_sequences = False
64
+ model.add(tensorflow.keras.layers.Dropout(stack[1]))
65
+ model.add(tensorflow.keras.layers.LSTM(stack[0], return_sequences=return_sequences))
66
+ else:
67
+ model.add(tensorflow.keras.layers.LSTM(self.neurons[0][0],
68
+ input_shape=(self.window_size, self.features)))
69
+
70
+ model.add(tensorflow.keras.layers.Dense(1))
71
+
72
+ optimizer = tensorflow.keras.optimizers.Adam(learning_rate=self.learning_rate)
73
+ model.compile(optimizer=optimizer, loss='mean_squared_error')
74
+
75
+ X_train, Y_train = [], []
76
+
77
+ X_train_data = data
78
+ Y_train_data = data.T[0].T
79
+
80
+ for i in range(len(Y_train_data) - output_sequence_length - self.window_size):
81
+ target_sequence = Y_train_data[i+self.window_size+output_sequence_length:i+self.window_size+output_sequence_length+1]
82
+ Y_train.append(target_sequence)
83
+
84
+ for i in range(len(X_train_data) - output_sequence_length - self.window_size):
85
+ input_sequence = X_train_data[i:i+self.window_size]
86
+ X_train.append(input_sequence)
87
+
88
+
89
+ X_train = np.array(X_train, dtype=np.float32)
90
+ Y_train = np.array(Y_train, dtype=np.float32)
91
+
92
+ # Calculate the split index
93
+ split_index = int(len(X_train) * 0.9)
94
+
95
+ # Split the data into training and validation sets
96
+ X_train, X_val = X_train[:split_index], X_train[split_index:]
97
+ Y_train, Y_val = Y_train[:split_index], Y_train[split_index:]
98
+
99
+
100
+ X_train = tensorflow.convert_to_tensor(X_train)
101
+ Y_train = tensorflow.convert_to_tensor(Y_train)
102
+
103
+ X_val = tensorflow.convert_to_tensor(X_val)
104
+ Y_val = tensorflow.convert_to_tensor(Y_val)
105
+
106
+ early_stopping = tensorflow.keras.callbacks.EarlyStopping(monitor="val_loss", patience=10,
107
+
108
+ restore_best_weights=True)
109
+ # ModelCheckpoint callback
110
+ model_checkpoint = tensorflow.keras.callbacks.ModelCheckpoint(filepath=self.model_dir, save_best_only=True, monitor='val_loss', mode='min', save_freq='epoch')
111
+
112
+
113
+ #model.fit(X_train, Y_train, epochs=epochs, batch_size=self.batch_size, callbacks=[early_stopping])
114
+ model.fit(X_train, Y_train, validation_data=(X_val, Y_val), epochs=epochs, batch_size=self.batch_size, callbacks=[early_stopping, model_checkpoint])
115
+ # model.save(self.model_dir)
116
+
117
+
118
+ def predict(self, data: ndarray):
119
+ predictions = []
120
+
121
+ window_data = data[-self.window_size:]
122
+ window_data = window_data.reshape(1, self.window_size, self.features)
123
+
124
+ predicted_value = self.loaded_model.predict(window_data)
125
+ predictions.append(predicted_value)
126
+ return predictions
127
+
128
+ @staticmethod
129
+ def base_model_dataset(samples):
130
+ min_cutoff = 0
131
+
132
+ cutoff_close = samples.tolist()[1][min_cutoff:]
133
+ cutoff_high = samples.tolist()[2][min_cutoff:]
134
+ cutoff_low = samples.tolist()[3][min_cutoff:]
135
+ cutoff_volume = samples.tolist()[4][min_cutoff:]
136
+
137
+ return np.array([cutoff_close,
138
+ cutoff_high,
139
+ cutoff_low,
140
+ cutoff_volume]).T