-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathAutoEncoder.py
More file actions
137 lines (101 loc) · 4.6 KB
/
AutoEncoder.py
File metadata and controls
137 lines (101 loc) · 4.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
#coding=utf-8
""" Auto-Encoder Neural Network (AE)
Using auto-encoder neural network to reconstruct digit images.
Author: Fenng
Project: https://github.com/floperry/Neural-Network/
"""
import tensorflow as tf
class autoencoder(object):
def __init__(self, sess, name, nodes):
self._sess = sess
self._name = name
self._nodes = nodes
self._build_net()
@property
def sess(self):
return self._sess
@property
def name(self):
return self._name
@property
def nodes(self):
return self._nodes
def _build_net(self):
# check whether number of encode layers is valid
if len(self._nodes) < 2:
raise ValueError('At least 2 encode layers should be specified.')
with tf.variable_scope(self._name) as scope:
# define palceholder
self.X = tf.placeholder(tf.float32, [None, self._nodes[0]])
self.learning_rate = tf.placeholder(tf.float32)
self.keep_prob = tf.placeholder(tf.float32)
layer = self.X
# define encode layer
for i, node in enumerate(self._nodes[:-2]):
layer = self._dense_layer(shape=self._nodes[i:i+2], inputs=layer,
layer_name='encoder_' + str(i+1))
layer = self._activation(layer)
layer = tf.nn.dropout(layer, keep_prob=self.keep_prob)
# define last encode layer
layer = self._dense_layer(shape=self._nodes[len(self._nodes)-2:len(self._nodes)], inputs=layer,
layer_name='encoder_' + str(len(self._nodes)-1))
# get hidden layer feature
self.hidden_feature = layer
layer = self._activation(layer)
# define decode layer
for i, node in enumerate(self._nodes[::-1][:-2]):
layer = self._dense_layer(shape=self._nodes[::-1][i:i+2], inputs=layer,
layer_name='decoder_' + str(i+1))
layer = self._activation(layer)
self.outputs = self._dense_layer(shape=self._nodes[::-1][len(self._nodes)-2:len(self._nodes)],
inputs=layer, layer_name='decoder_' + str(len(self._nodes)-1))
# define loss function and optimizer
self.loss = self._loss_function(self.outputs, self.X)
self.optimizer = self._optimizer(self.loss)
# define initializer
self._sess.run(tf.global_variables_initializer())
# define dense layer
def _dense_layer(self, shape, inputs, layer_name):
with tf.variable_scope(layer_name) as scope:
W = tf.get_variable(layer_name + '_W', shape=shape, initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.random_normal([shape[-1]]))
return tf.add(tf.matmul(inputs, W), b)
# define activation function
def _activation(self, inputs):
return tf.nn.relu(inputs)
# define loss function
def _loss_function(self, predict, labels):
return tf.reduce_mean(tf.square(predict - labels))
# define optimizer
def _optimizer(self, loss):
return tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(loss)
# train model
def train(self, x_data, epochs=100, batch_size=128, learning_rate=0.001, keep_prob=0.7):
losses = []
for epoch in range(epochs):
avg_loss = 0
total_batch = int(x_data.num_examples / batch_size)
for i in range(total_batch):
batch_xs, _ = x_data.next_batch(batch_size)
l, _ = self._sess.run([self.loss, self.optimizer],
feed_dict={self.X: batch_xs,
self.learning_rate: learning_rate,
self.keep_prob: keep_prob})
avg_loss += l / total_batch
print("Epoch ", "%04d" %(epoch+1), "loss = ", "{:.9f}".format(avg_loss))
losses.append(avg_loss)
return losses
# predict
def predict(self, x_data, keep_prob=1.0):
return self._sess.run(self.outputs, feed_dict={self.X: x_data, self.keep_prob: keep_prob})
# get hidden feature
def get_feature(self, x_data, keep_prob=1.0):
return self._sess.run(self.hidden_feature, feed_dict={self.X: x_data, self.keep_prob: keep_prob})
# save model
def save(self, path):
saver = tf.train.Saver()
saver.save(self.sess, path)
# load model
def load(self, path):
saver = tf.train.Saver()
saver.restore(self.sess, path)