神经网络:代码实现
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_datamnist = input_data.read_data_sets('data/', one_hot=True)
# 神經網絡的框架(輸入層,隱層(兩層),輸出層)
n_hidden_1 = 256 #隱層第一層神經元的個數
n_hidden_2 = 128 #隱層第二層神經元的個數
n_input = 784 #樣本特征(像素點)
n_classes = 10 #分類的類別# 輸入和輸出
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])# 神經網絡的參數(w,b)
std = 0.1#標準方差
weights = {'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=std)),'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=std)),'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=std))
}#w1大小為784*256矩陣;w2大小為256*128矩陣;高斯初始化
biases = {'b1': tf.Variable(tf.random_normal([n_hidden_1])),'b2': tf.Variable(tf.random_normal([n_hidden_2])),'out': tf.Variable(tf.random_normal([n_classes]))
}#b1大小256;b2大小128;out大小10#前向傳播(輸入X,權重參數W,偏置項b)
def multilayer_forward(_X, _weights, _biases):layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1'])) #計算W*X+b之后在進行Sigmoid激活layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, _weights['w2']), _biases['b2']))return (tf.matmul(layer_2, _weights['out']) + _biases['out'])#輸出層,無激活函數#一次前向傳播的結果(預測值)
pred = multilayer_forward(x, weights, biases)# 損失函數(cost)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(y,pred)) #交叉熵函數(預測值,標簽(真實值))#梯度下降,進行優化求解
optm = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(cost) #計算精度(準確率)
corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accr = tf.reduce_mean(tf.cast(corr, "float"))# 初始化
init = tf.global_variables_initializer()
training_epochs = 30
batch_size = 46
display_step = 5
# LAUNCH THE GRAPH
sess = tf.Session()
sess.run(init)
# 優化
for epoch in range(training_epochs):avg_cost = 0.0total_batch = int(mnist.train.num_examples/batch_size)# 迭代for i in range(total_batch):batch_xs, batch_ys = mnist.train.next_batch(batch_size)feeds = {x: batch_xs, y: batch_ys}#填充對應的值sess.run(optm, feed_dict=feeds)#不斷優化求解avg_cost += sess.run(cost, feed_dict=feeds)avg_cost = avg_cost / total_batch# 顯示if epoch % display_step == 0:print ("Epoch: %02d/%02d cost: %.6f" % (epoch, training_epochs, avg_cost))feeds = {x: batch_xs, y: batch_ys}train_acc = sess.run(accr, feed_dict=feeds)#訓練集的精度print ("TRAIN ACCURACY: %.3f" % (train_acc))feeds = {x: mnist.test.images, y: mnist.test.labels}test_acc = sess.run(accr, feed_dict=feeds)#測試集的精度print ("TEST ACCURACY: %.3f" % (test_acc))
print("Done")
運行結果:
可以看出,損失值在不斷減少,訓練集和測試集的精度也在逐步改善。
總結
- 上一篇: 《秋思》第六句是什么
- 下一篇: 模型数据的保存和读取