首页 > 学院 > 开发设计 > 正文

【tensorflow学习笔记】(8)dropout

2019-11-08 00:43:48
字体:
来源:转载
供稿:网友

与上几遍博文一样,以实例代码讲解:

"""Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly."""import tensorflow as tffrom sklearn.datasets import load_digitsfrom sklearn.cross_validation import train_test_splitfrom sklearn.PReprocessing import LabelBinarizer# load datadigits = load_digits()X = digits.data    #����1��9������y = digits.targety = LabelBinarizer().fit_transform(y)X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3)def add_layer(inputs, in_size, out_size, layer_name, activation_function=None, ):    # add one more layer and return the output of this layer    Weights = tf.Variable(tf.random_normal([in_size, out_size]))    biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, )    Wx_plus_b = tf.matmul(inputs, Weights) + biases    # here to dropout    Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob)    if activation_function is None:        outputs = Wx_plus_b    else:        outputs = activation_function(Wx_plus_b, )    tf.histogram_summary(layer_name + '/outputs', outputs)    return outputs# define placeholder for inputs to networkkeep_prob = tf.placeholder(tf.float32)xs = tf.placeholder(tf.float32, [None, 64])  # 8x8ys = tf.placeholder(tf.float32, [None, 10])# add output layerl1 = add_layer(xs, 64, 50, 'l1', activation_function=tf.nn.tanh)prediction = add_layer(l1, 50, 10, 'l2', activation_function=tf.nn.softmax)# the loss between prediction and real datacross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),                                              reduction_indices=[1]))  # losstf.scalar_summary('loss', cross_entropy)train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)sess = tf.session()merged = tf.merge_all_summaries()# summary writer goes in heretrain_writer = tf.train.SummaryWriter("logs/train", sess.graph)test_writer = tf.train.SummaryWriter("logs/test", sess.graph)sess.run(tf.initialize_all_variables())for i in range(500):    # here to determine the keeping probability    sess.run(train_step, feed_dict={xs: X_train, ys: y_train, keep_prob: 0.5})    if i % 50 == 0:        # record loss        train_result = sess.run(merged, feed_dict={xs: X_train, ys: y_train, keep_prob: 1})        test_result = sess.run(merged, feed_dict={xs: X_test, ys: y_test, keep_prob: 1})        train_writer.add_summary(train_result, i)        test_writer.add_summary(test_result, i)


发表评论 共有条评论
用户名: 密码:
验证码: 匿名发表