در کد زیر فرمولی که مشخص کردم از کجا اومده؟
import tensorflow as tf
import numpy as np
# load MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data', one_hot=True)
mnist_width = 28
n_visible = mnist_width * mnist_width
n_hidden = 500
corruption_level = 0.3
X = tf.placeholder("float", [None, n_visible], name='X')
mask = tf.placeholder("float", [None, n_visible], name='mask')
# create nodes for hidden variables
این فرمول از کجا اومده؟؟؟
########################################
W_init_max = 4 * np.sqrt(6. / (n_visible + n_hidden))
########################################
W_init = tf.random_uniform(shape=[n_visible, n_hidden],
minval=-W_init_max,
maxval=W_init_max)
W = tf.Variable(W_init, name='W')#shape:784x500
b = tf.Variable(tf.zeros([n_hidden]), name='b')
W_prime = tf.transpose(W)
b_prime = tf.Variable(tf.zeros([n_visible]), name='b_prime')
def model(X, mask, W, b, W_prime, b_prime):
tilde_X = mask * X # corrupted X
Y = tf.nn.sigmoid(tf.matmul(tilde_X, W) + b) # hidden state
Z = tf.nn.sigmoid(tf.matmul(Y, W_prime) + b_prime) # reconstructed input
return Z
# build model graph
Z = model(X, mask, W, b, W_prime, b_prime)
# create cost function
cost = tf.reduce_sum(tf.pow(X - Z, 2)) # minimize squared error
train_op = tf.train.GradientDescentOptimizer(0.02).minimize(cost) # construct an optimizer
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
# Launch the graph in a session
with tf.Session() as sess:
# you need to initialize all variables
tf.initialize_all_variables().run()
for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1,128)): # ؟؟؟
input_ = trX[start:end]
mask_np = np.random.binomial(1, 1 - corruption_level, input_.shape)
sess.run(train_op, feed_dict={X: input_, mask: mask_np})
mask_np = np.random.binomial(1, 1 - corruption_level, teX.shape)
print(i, sess.run(cost, feed_dict={X: teX, mask: mask_np}))