1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
| import tensorflow as tf from numpy.random import RandomState
batch_size = 8
w1 = tf.Variable(tf.random_normal([2, 3], stddev = 1, seed = 1)) w2 = tf.Variable(tf.random_normal([3, 1], stddev = 1, seed = 1))
x = tf.placeholder(tf.float32, shape=(None, 2), name='x-input') y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input')
a = tf.matmul(x, w1) y = tf.matmul(a, w2)
cross_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0))) train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)
rdm = RandomState(1) dataset_size = 128
X = rdm.rand(dataset_size, 2)
Y = [[int(x1 + x2 < 1)] for (x1, x2) in X]
with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) ''' 在训练之前神经网络参数值 w1= [[-0.8113182 1.4845988 0.06532937] [-2.4427042 0.0992484 0.5912243 ]]
w2 = [[-0.8113182 ] [ 1.4845988 ] [ 0.06532937]] ''' STEPS = 5000 for i in range(STEPS): start = (i * batch_size) % dataset_size end = min(start + batch_size, dataset_size) sess.run(train_step, feed_dict = {x: X[start:end], y_: Y[start:end]}) if i % 1000 == 0: total_cross_entropy = sess.run(cross_entropy,feed_dict = {x: X, y_: Y}) print("After %d training step(s),cross entropy on all data is %g" % (i, total_cross_entropy)) ''' 输出结果 After 0 training step(s),cross entropy on all data is 0.0674925 After 1000 training step(s),cross entropy on all data is 0.0163385 After 2000 training step(s),cross entropy on all data is 0.00907547 After 3000 training step(s),cross entropy on all data is 0.00714436 After 4000 training step(s),cross entropy on all data is 0.00578471 交叉熵逐渐变小,说明预测的结果和真实的结果差距越小 ''' print(sess.run(w1)) print(sess.run(w2)) ''' 训练之后的神经网络参数值 w1 = [[-1.9618274 2.582354 1.6820377] [-3.4681718 1.0698233 2.11789 ]] w2 = [[-1.8247149] [ 2.6854665] [ 1.418195 ]] '''
|