日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

《神经网络与深度学习》编程笔记

發布時間:2023/12/16 编程问答 28 豆豆
生活随笔 收集整理的這篇文章主要介紹了 《神经网络与深度学习》编程笔记 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

環境

建議使用Anaconda下載鏈接:https://www.anaconda.com/download/

Theano+Python 3.5

項目結構

)

加載數據源

import numpy as np import gzip import pickle def load_data():f = gzip.open('./data/mnist.pkl.gz','rb')training_data, validation_data, test_data = pickle.load(f,encoding='bytes')f.close()return (training_data, validation_data, test_data)def load_data_wrapper():tr_d, va_d, te_d = load_data()training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]training_results = [vectorized_result(y) for y in tr_d[1]]training_data = zip(training_inputs, training_results)validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]validation_data = zip(validation_inputs, va_d[1])test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]test_data = zip(test_inputs, te_d[1])return (training_data, validation_data, test_data)def vectorized_result(j):e = np.zeros((10, 1))e[j] = 1.0return e

神經網絡

第一種

''' @author: liuxing '''import randomimport numpy as np class Network(object):def __init__(self, sizes):self.num_layers = len(sizes)self.sizes = sizesself.biases = [np.random.randn(y, 1) for y in sizes[1:]]self.weights = [np.random.randn(y, x)for x, y in zip(sizes[:-1], sizes[1:])]def feedforward(self, a):for b, w in zip(self.biases, self.weights):a = sigmoid(np.dot(w, a)+b)return adef SGD(self, training_data, epochs, mini_batch_size, eta,test_data=None):if test_data: n_test = len(test_data)n = len(training_data)for j in range(epochs):random.shuffle(training_data)mini_batches = [training_data[k:k+mini_batch_size]for k in range(0, n, mini_batch_size)]for mini_batch in mini_batches:self.update_mini_batch(mini_batch, eta)if test_data:print("Epoch {0}: {1} / {2}".format(j, self.evaluate(test_data), n_test))else:print("Epoch {0} complete".format(j))def update_mini_batch(self, mini_batch, eta):nabla_b = [np.zeros(b.shape) for b in self.biases]nabla_w = [np.zeros(w.shape) for w in self.weights]for x, y in mini_batch:delta_nabla_b, delta_nabla_w = self.backprop(x, y)nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]self.weights = [w-(eta/len(mini_batch))*nwfor w, nw in zip(self.weights, nabla_w)]self.biases = [b-(eta/len(mini_batch))*nbfor b, nb in zip(self.biases, nabla_b)]def backprop(self, x, y):nabla_b = [np.zeros(b.shape) for b in self.biases]nabla_w = [np.zeros(w.shape) for w in self.weights]# feedforwardactivation = xactivations = [x] # list to store all the activations, layer by layerzs = [] # list to store all the z vectors, layer by layerfor b, w in zip(self.biases, self.weights):z = np.dot(w, activation)+bzs.append(z)activation = sigmoid(z)activations.append(activation)# backward passdelta = self.cost_derivative(activations[-1], y) * \sigmoid_prime(zs[-1])nabla_b[-1] = deltanabla_w[-1] = np.dot(delta, activations[-2].transpose())for l in range(2, self.num_layers):z = zs[-l]sp = sigmoid_prime(z)delta = np.dot(self.weights[-l+1].transpose(), delta) * spnabla_b[-l] = deltanabla_w[-l] = np.dot(delta, activations[-l-1].transpose())return (nabla_b, nabla_w)def evaluate(self, test_data):test_results = [(np.argmax(self.feedforward(x)), y)for (x, y) in test_data]return sum(int(x == y) for (x, y) in test_results)def cost_derivative(self, output_activations, y):return (output_activations-y)def sigmoid(z):return 1.0/(1.0+np.exp(-z))def sigmoid_prime(z):return sigmoid(z)*(1-sigmoid(z))

第二種

@author: liuxing ''' import json import random import sys import numpy as np class QuadraticCost(object):@staticmethoddef fn(a, y):return 0.5*np.linalg.norm(a-y)**2@staticmethoddef delta(z, a, y):return (a-y) * sigmoid_prime(z)class CrossEntropyCost(object):@staticmethoddef fn(a, y):return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))@staticmethoddef delta(z, a, y):return (a-y) class Network(object):def __init__(self, sizes, cost=CrossEntropyCost):self.num_layers = len(sizes)self.sizes = sizesself.default_weight_initializer()self.cost=costdef default_weight_initializer(self):self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]self.weights = [np.random.randn(y, x)/np.sqrt(x)for x, y in zip(self.sizes[:-1], self.sizes[1:])]def large_weight_initializer(self):self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]self.weights = [np.random.randn(y, x)for x, y in zip(self.sizes[:-1], self.sizes[1:])]def feedforward(self, a):for b, w in zip(self.biases, self.weights):a = sigmoid(np.dot(w, a)+b)return adef SGD(self, training_data, epochs, mini_batch_size, eta,lmbda = 0.0,evaluation_data=None,monitor_evaluation_cost=False,monitor_evaluation_accuracy=False,monitor_training_cost=False,monitor_training_accuracy=False):if evaluation_data: n_data = len(evaluation_data)n = len(training_data)evaluation_cost, evaluation_accuracy = [], []training_cost, training_accuracy = [], []for j in range(epochs):random.shuffle(training_data)mini_batches = [training_data[k:k+mini_batch_size]for k in range(0, n, mini_batch_size)]for mini_batch in mini_batches:self.update_mini_batch(mini_batch, eta, lmbda, len(training_data))print("Epoch %s training complete" % j)if monitor_training_cost:cost = self.total_cost(training_data, lmbda)training_cost.append(cost)print("Cost on training data: {}".format(cost))if monitor_training_accuracy:accuracy = self.accuracy(training_data, convert=True)training_accuracy.append(accuracy)print("Accuracy on training data: {} / {}".format(accuracy, n))if monitor_evaluation_cost:cost = self.total_cost(evaluation_data, lmbda, convert=True)evaluation_cost.append(cost)print("Cost on evaluation data: {}".format(cost))if monitor_evaluation_accuracy:accuracy = self.accuracy(evaluation_data)evaluation_accuracy.append(accuracy)print("Accuracy on evaluation data: {} / {}".format(self.accuracy(evaluation_data), n_data))printreturn evaluation_cost, evaluation_accuracy, \training_cost, training_accuracydef update_mini_batch(self, mini_batch, eta, lmbda, n):nabla_b = [np.zeros(b.shape) for b in self.biases]nabla_w = [np.zeros(w.shape) for w in self.weights]for x, y in mini_batch:delta_nabla_b, delta_nabla_w = self.backprop(x, y)nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nwfor w, nw in zip(self.weights, nabla_w)]self.biases = [b-(eta/len(mini_batch))*nbfor b, nb in zip(self.biases, nabla_b)]def backprop(self, x, y):nabla_b = [np.zeros(b.shape) for b in self.biases]nabla_w = [np.zeros(w.shape) for w in self.weights]# feedforwardactivation = xactivations = [x] # list to store all the activations, layer by layerzs = [] # list to store all the z vectors, layer by layerfor b, w in zip(self.biases, self.weights):z = np.dot(w, activation)+bzs.append(z)activation = sigmoid(z)activations.append(activation)# backward passdelta = (self.cost).delta(zs[-1], activations[-1], y)nabla_b[-1] = deltanabla_w[-1] = np.dot(delta, activations[-2].transpose())for l in range(2, self.num_layers):z = zs[-l]sp = sigmoid_prime(z)delta = np.dot(self.weights[-l+1].transpose(), delta) * spnabla_b[-l] = deltanabla_w[-l] = np.dot(delta, activations[-l-1].transpose())return (nabla_b, nabla_w)def accuracy(self, data, convert=False):if convert:results = [(np.argmax(self.feedforward(x)), np.argmax(y))for (x, y) in data]else:results = [(np.argmax(self.feedforward(x)), y)for (x, y) in data]return sum(int(x == y) for (x, y) in results)def total_cost(self, data, lmbda, convert=False):cost = 0.0for x, y in data:a = self.feedforward(x)if convert: y = vectorized_result(y)cost += self.cost.fn(a, y)/len(data)cost += 0.5*(lmbda/len(data))*sum(np.linalg.norm(w)**2 for w in self.weights)return costdef save(self, filename):data = {"sizes": self.sizes,"weights": [w.tolist() for w in self.weights],"biases": [b.tolist() for b in self.biases],"cost": str(self.cost.__name__)}f = open(filename, "w")json.dump(data, f)f.close()#### Loading a Network def load(filename):f = open(filename, "r")data = json.load(f)f.close()cost = getattr(sys.modules[__name__], data["cost"])net = Network(data["sizes"], cost=cost)net.weights = [np.array(w) for w in data["weights"]]net.biases = [np.array(b) for b in data["biases"]]return netdef vectorized_result(j):e = np.zeros((10, 1))e[j] = 1.0return edef sigmoid(z):return 1.0/(1.0+np.exp(-z))def sigmoid_prime(z):return sigmoid(z)*(1-sigmoid(z))

第三種

''' @author: liuxing ''' import gzip import numpy as np import theano import theano.tensor as T from theano.tensor.nnet import conv from theano.tensor.nnet import softmax from theano.tensor import shared_randomstreams from theano.tensor.signal import downsample import pickle def linear(z): return z def ReLU(z): return T.maximum(0.0, z) from theano.tensor.nnet import sigmoid GPU = True if GPU:print("Trying to run under a GPU. If this is not desired, then modify "+\"network3.py\nto set the GPU flag to False.")try: theano.config.device = 'gpu'except: pass # it's already settheano.config.floatX = 'float32' else:print("Running with a CPU. If this is not desired, then the modify "+\"network3.py to set\nthe GPU flag to True.")#### Load the MNIST data def load_data_shared(filename="./data/mnist.pkl.gz"):f = gzip.open(filename, 'rb')training_data, validation_data, test_data = pickle.load(f,encoding='bytes')f.close()def shared(data):shared_x = theano.shared(np.asarray(data[0], dtype=theano.config.floatX), borrow=True)shared_y = theano.shared(np.asarray(data[1], dtype=theano.config.floatX), borrow=True)return shared_x, T.cast(shared_y, "int32")return [shared(training_data), shared(validation_data), shared(test_data)]#### Main class used to construct and train networks class Network(object):def __init__(self, layers, mini_batch_size):self.layers = layersself.mini_batch_size = mini_batch_sizeself.params = [param for layer in self.layers for param in layer.params]self.x = T.matrix("x")self.y = T.ivector("y")init_layer = self.layers[0]init_layer.set_inpt(self.x, self.x, self.mini_batch_size)for j in range(1, len(self.layers)):prev_layer, layer = self.layers[j-1], self.layers[j]layer.set_inpt(prev_layer.output, prev_layer.output_dropout, self.mini_batch_size)self.output = self.layers[-1].outputself.output_dropout = self.layers[-1].output_dropoutdef SGD(self, training_data, epochs, mini_batch_size, eta,validation_data, test_data, lmbda=0.0):training_x, training_y = training_datavalidation_x, validation_y = validation_datatest_x, test_y = test_datanum_training_batches = size(training_data)/mini_batch_sizenum_validation_batches = size(validation_data)/mini_batch_sizenum_test_batches = size(test_data)/mini_batch_sizel2_norm_squared = sum([(layer.w**2).sum() for layer in self.layers])cost = self.layers[-1].cost(self)+\0.5*lmbda*l2_norm_squared/num_training_batchesgrads = T.grad(cost, self.params)updates = [(param, param-eta*grad)for param, grad in zip(self.params, grads)]i = T.lscalar() # mini-batch indextrain_mb = theano.function([i], cost, updates=updates,givens={self.x:training_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],self.y:training_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]})validate_mb_accuracy = theano.function([i], self.layers[-1].accuracy(self.y),givens={self.x:validation_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],self.y:validation_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]})test_mb_accuracy = theano.function([i], self.layers[-1].accuracy(self.y),givens={self.x:test_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],self.y:test_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]})self.test_mb_predictions = theano.function([i], self.layers[-1].y_out,givens={self.x:test_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size]})# Do the actual trainingbest_validation_accuracy = 0.0for epoch in range(epochs):for minibatch_index in frange(num_training_batches,0.0):iteration = num_training_batches*epoch+minibatch_indexif iteration % 1000 == 0:print("Training mini-batch number {0}".format(iteration))cost_ij = train_mb(minibatch_index)if (iteration+1) % num_training_batches == 0:validation_accuracy = np.mean([validate_mb_accuracy(j) for j in range(num_validation_batches)])print("Epoch {0}: validation accuracy {1:.2%}".format(epoch, validation_accuracy))if validation_accuracy >= best_validation_accuracy:print("This is the best validation accuracy to date.")best_validation_accuracy = validation_accuracybest_iteration = iterationif test_data:test_accuracy = np.mean([test_mb_accuracy(j) for j in range(num_test_batches)])print('The corresponding test accuracy is {0:.2%}'.format(test_accuracy))print("Finished training network.")print("Best validation accuracy of {0:.2%} obtained at iteration {1}".format(best_validation_accuracy, best_iteration))print("Corresponding test accuracy of {0:.2%}".format(test_accuracy))class ConvPoolLayer(object):def __init__(self, filter_shape, image_shape, poolsize=(2, 2),activation_fn=sigmoid):self.filter_shape = filter_shapeself.image_shape = image_shapeself.poolsize = poolsizeself.activation_fn=activation_fn# initialize weights and biasesn_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))self.w = theano.shared(np.asarray(np.random.normal(loc=0, scale=np.sqrt(1.0/n_out), size=filter_shape),dtype=theano.config.floatX),borrow=True)self.b = theano.shared(np.asarray(np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),dtype=theano.config.floatX),borrow=True)self.params = [self.w, self.b]def set_inpt(self, inpt, inpt_dropout, mini_batch_size):self.inpt = inpt.reshape(self.image_shape)conv_out = conv.conv2d(input=self.inpt, filters=self.w, filter_shape=self.filter_shape,image_shape=self.image_shape)pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)self.output = self.activation_fn(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))self.output_dropout = self.output # no dropout in the convolutional layersclass FullyConnectedLayer(object):def __init__(self, n_in, n_out, activation_fn=sigmoid, p_dropout=0.0):self.n_in = n_inself.n_out = n_outself.activation_fn = activation_fnself.p_dropout = p_dropout# Initialize weights and biasesself.w = theano.shared(np.asarray(np.random.normal(loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),dtype=theano.config.floatX),name='w', borrow=True)self.b = theano.shared(np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),dtype=theano.config.floatX),name='b', borrow=True)self.params = [self.w, self.b]def set_inpt(self, inpt, inpt_dropout, mini_batch_size):self.inpt = inpt.reshape((mini_batch_size, self.n_in))self.output = self.activation_fn((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)self.y_out = T.argmax(self.output, axis=1)self.inpt_dropout = dropout_layer(inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)self.output_dropout = self.activation_fn(T.dot(self.inpt_dropout, self.w) + self.b)def accuracy(self, y):return T.mean(T.eq(y, self.y_out))class SoftmaxLayer(object):def __init__(self, n_in, n_out, p_dropout=0.0):self.n_in = n_inself.n_out = n_outself.p_dropout = p_dropout# Initialize weights and biasesself.w = theano.shared(np.zeros((n_in, n_out), dtype=theano.config.floatX),name='w', borrow=True)self.b = theano.shared(np.zeros((n_out,), dtype=theano.config.floatX),name='b', borrow=True)self.params = [self.w, self.b]def set_inpt(self, inpt, inpt_dropout, mini_batch_size):self.inpt = inpt.reshape((mini_batch_size, self.n_in))self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)self.y_out = T.argmax(self.output, axis=1)self.inpt_dropout = dropout_layer(inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)def cost(self, net):return -T.mean(T.log(self.output_dropout)[T.arange(net.y.shape[0]), net.y])def accuracy(self, y):return T.mean(T.eq(y, self.y_out))def size(data):return data[0].get_value(borrow=True).shape[0] def frange(start, stop, step=1):i = startwhile i < stop:yield ii += step def dropout_layer(layer, p_dropout):srng = shared_randomstreams.RandomStreams(np.random.RandomState(0).randint(999999))mask = srng.binomial(n=1, p=1-p_dropout, size=layer.shape)return layer*T.cast(mask, theano.config.floatX)

測試代碼

用哪種直接去掉注釋就好

''' @author: liuxing ''' # # Test network # import mnist_loader # training_data, validation_data, test_data = mnist_loader.load_data_wrapper() # import network # net = network.Network([784, 10]) # net.SGD(list(training_data), 5, 10, 5.0, test_data=list(test_data))# Test network2 from com.tensorflowTest.network import mnist_loader training_data, validation_data, test_data = mnist_loader.load_data_wrapper() from com.tensorflowTest.network import network2 net = network2.Network([784, 30, 10]) net = network2.Network([784, 30, 30, 10]) net.SGD(list(training_data), 30, 10, 0.1, lmbda=5.0,evaluation_data=list(validation_data), monitor_evaluation_accuracy=True) # Test network3 # import network3 # from network3 import Network # from network3 import FullyConnectedLayer, SoftmaxLayer # training_data, validation_data, test_data = network3.load_data_shared() # mini_batch_size = 10 # net = Network([FullyConnectedLayer(n_in=784, n_out=100), # SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size) # net.SGD(list(training_data), 60, mini_batch_size, 0.1, # list(validation_data), list(test_data))

參考書籍和數據源

總結

以上是生活随笔為你收集整理的《神经网络与深度学习》编程笔记的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。