forked from SayHiRay/malware-detection
-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
162 lines (128 loc) · 6.25 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Conv1D, MaxPooling1D, GlobalAveragePooling1D, Dropout, Embedding, AlphaDropout
from keras.callbacks import Callback, ModelCheckpoint
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import roc_auc_score
def read_and_preprocess_training_data(file_name_train, file_name_label):
"""
Read in training features and labels, and preprocess them.
"""
train = pd.read_csv(file_name_train, header=None, usecols=list(range(4096)), dtype=np.float16)
X_raw = train.values
input_dim_X = (~np.isnan(X_raw)).sum(
1) # Used later to sort the training samples, so that all samples in one batch have similar dimensions
train.fillna(0, inplace=True)
train = train.astype(np.int16)
labels = pd.read_csv(file_name_label, dtype={'sample_id': np.int32, 'category': np.int8})
train_labels = pd.concat([train, labels], axis=1)
train_labels = train_labels.assign(num_dim=pd.Series(input_dim_X))
train_labels.sort_values(by='num_dim', ascending=False,
inplace=True) # Now we have all training data sorted by the number of their dimensions
train = train_labels.drop(['sample_id', 'category', 'num_dim'], axis=1)
X = train.values
y = train_labels['category'].values
return X, y
class roc_callback(Callback):
"""
A Keras callback function for calculating the auc_score on validation set.
"""
def __init__(self, training_data, validation_data):
self.x_val = validation_data[0]
self.y_val = validation_data[1]
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
y_pred_val = self.model.predict(self.x_val)
roc_val = roc_auc_score(self.y_val, y_pred_val)
print('\rroc-auc_val: %s \n' % (str(round(roc_val, 5))), end=100 * ' ' + '\n')
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
def f_for_validating_model(x_train, y_train, x_valid, y_valid, params):
"""
Construct and train a Keras model according to the input params,
and return the model after training is finished.
"""
print(params)
conv_dropout_1 = params['conv_dropout_1']
conv_dropout_2 = params['conv_dropout_2']
conv_dropout_3 = params['conv_dropout_3']
conv_dropout_4 = params['conv_dropout_4']
conv_dropout_5 = params['conv_dropout_5']
dense_dropout = params['dense_dropout']
dense_dim = params['dense_dim']
optimizer = params['optimizer']
batch_size = params['batch_size']
epochs = params['epochs']
callbacks = params['callbacks']
input_length = 4096
model = Sequential()
model.add(Embedding(256, 16, input_length=input_length))
model.add(Dropout(conv_dropout_1))
model.add(Conv1D(48, 32, strides=4, padding='same', dilation_rate=1, activation='relu', use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros'))
model.add(Dropout(conv_dropout_2))
model.add(Conv1D(96, 32, strides=4, padding='same', dilation_rate=1, activation='relu', use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros'))
model.add(Dropout(conv_dropout_3))
model.add(MaxPooling1D(pool_size=4, strides=None, padding='valid'))
model.add(Conv1D(128, 16, strides=8, padding='same', dilation_rate=1, activation='relu', use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros'))
model.add(Dropout(conv_dropout_4))
model.add(Conv1D(192, 16, strides=8, padding='same', dilation_rate=1, activation='relu', use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros'))
model.add(Dropout(conv_dropout_5))
model.add(Flatten())
model.add(Dense(dense_dim, activation='selu'))
model.add(Dropout(dense_dropout))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_valid, y_valid),
callbacks=callbacks)
# score, acc = model.evaluate(x_valid, y_valid, verbose=0)
# print('Test accuracy after training finished: ', acc)
return model
def train_a_model(X, y, model_number=0, random_state=0):
"""
Randomly divide train and validation set according to the input random_state,
and then use the divided dataset to train a model. Prediction accuracy and
AUC score is reported, and the model is saved after each epoch during the
training process. The reported information will then be used to select which
model we want to use for our prediction on the test dataset.
"""
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=random_state)
for train_index, valid_index in sss.split(X, y):
x_train, x_valid = X[train_index], X[valid_index]
y_train, y_valid = y[train_index], y[valid_index]
checkpoint_filepath = "model_{}".format(model_number) + "-{epoch:02d}-{val_acc:.4f}.hdf5"
checkpoint = ModelCheckpoint(checkpoint_filepath, monitor='val_acc', verbose=1, save_weights_only=True, mode='max')
callbacks_list = [checkpoint, roc_callback(training_data=(x_train, y_train), validation_data=(x_valid, y_valid))]
params_for_validating_model = {
'batch_size': 128,
'conv_dropout_1': 0.2,
'conv_dropout_2': 0.2,
'conv_dropout_3': 0.2,
'conv_dropout_4': 0.2,
'conv_dropout_5': 0.2,
'dense_dim': 64,
'dense_dropout': 0.5,
'epochs': 50,
'optimizer': 'adam',
'callbacks': callbacks_list
}
validated_model = f_for_validating_model(x_train, y_train, x_valid, y_valid, params_for_validating_model)
if __name__ == "__main__":
file_name_train = r'train.csv'
file_name_label = r'train_label.csv'
X, y = read_and_preprocess_training_data(file_name_train, file_name_label)
train_a_model(X, y, model_number=0, random_state=0)