forked from omarsayed7/Deep-Emotion
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
83 lines (72 loc) · 3.1 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# from __future__ import print_function
import os
import argparse
from datetime import datetime
import tensorflow as tf
from deep_emotion import Deep_Emotion
from generate_data import Generate_data
tf.random.set_seed(1234)
logdir = os.path.join('logs', 'fit', datetime.now().strftime("%Y%m%d-%H%M%S"))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Configuration of setup and training process")
parser.add_argument('-s', '--setup', type=bool, help='setup the dataset for the first time')
parser.add_argument('-d', '--data', type=str,required= True,
help='data folder that contains data files that downloaded from kaggle (train.csv and test.csv)')
parser.add_argument('-hparams', '--hyperparams', type=bool,
help='True when changing the hyperparameters e.g (batch size, LR, num. of epochs)')
parser.add_argument('-e', '--epochs', type= int, help= 'number of epochs')
parser.add_argument('-lr', '--learning_rate', type= float, help= 'value of learning rate')
parser.add_argument('-bs', '--batch_size', type= int, help= 'training/validation batch size')
parser.add_argument('-t', '--train', type=bool, help='True when training')
args = parser.parse_args()
if args.setup :
generate_dataset = Generate_data(args.data)
generate_dataset.split_test()
generate_dataset.save_images('train')
# generate_dataset.save_images('test')
generate_dataset.save_images('val')
if args.hyperparams:
epochs = args.epochs
lr = args.learning_rate
batchsize = args.batch_size
else : # setting hyperparameters as mentioned in paper
epochs = 500
lr = 0.005
batchsize = 32
if args.train:
net = Deep_Emotion()
# net.to(device)
print("Model architecture: ", net)
train_img_dir = os.path.join(args.data, 'train')
validation_img_dir = os.path.join(args.data, 'val')
# transformation= transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,),(0.5,))])
train_dataset = tf.keras.utils.image_dataset_from_directory(
directory=train_img_dir,
color_mode='grayscale',
batch_size=batchsize,
image_size=(48,48)
)
validation_dataset = tf.keras.utils.image_dataset_from_directory(
directory=validation_img_dir,
color_mode='grayscale',
batch_size=batchsize,
image_size=(48,48)
)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=logdir,
histogram_freq=1,
write_images=True
)
net.compile(
optimizer=optimizer,
loss=loss_object,
metrics=['accuracy']
)
net.fit(
x=train_dataset,
epochs=epochs,
validation_data=validation_dataset,
callbacks=[tensorboard_callback]
)