吴恩达深度学习coursework1
生活随笔
收集整理的這篇文章主要介紹了
吴恩达深度学习coursework1
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
coursera 地址
import numpy as np import matplotlib.pyplot as plt import h5py from lr_utils import load_datasettrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset( )m_train = train_set_y.shape[1] #訓練集里圖片的數量。 m_test = test_set_y.shape[1] #測試集里圖片的數量。 num_px = train_set_x_orig.shape[1] #訓練、測試集里面的圖片的寬度和高度(均為64x64)。#現在看一看我們加載的東西的具體情況 print("訓練集的數量: m_train = " + str(m_train)) print("測試集的數量 : m_test = " + str(m_test)) print("每張圖片的寬/高 : num_px = " + str(num_px)) print("每張圖片的大小 : (" + str(num_px) + ", " + str(num_px) + ", 3)") print("訓練集_圖片的維數 train_set_x : " + str(train_set_x_orig.shape)) print("訓練集_標簽的維數 train_set_y : " + str(train_set_y.shape)) print("測試集_圖片的維數 test_set_x : " + str(test_set_x_orig.shape)) print("測試集_標簽的維數: test_set_y " + str(test_set_y.shape))#將訓練集的維度降低并轉置。 train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T #將測試集的維度降低并轉置。 test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).Tprint("訓練集降維最后的維度:train_set_x_flatten " + str(train_set_x_flatten.shape)) print("訓練集_標簽的維數 train_set_y : " + str(train_set_y.shape)) print("測試集降維之后的維度: test_set_x_flatten " + str(test_set_x_flatten.shape)) print("測試集_標簽的維數 test_set_y : " + str(test_set_y.shape)) print("sanity check after reshaping: " + str(train_set_x_flatten[0:5, 0]))train_set_x = train_set_x_flatten / 255 test_set_x = test_set_x_flatten / 255def sigmoid(x):return 1/(1+np.exp(-x))def initialize_with_zeros(dim):"""This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.Argument:dim -- size of the w vector we want (or number of parameters in this case)Returns:w -- initialized vector of shape (dim, 1)b -- initialized scalar (corresponds to the bias)"""w=np.zeros((dim,1))b=0assert(w.shape==(dim,1))assert(isinstance(b,float)or isinstance(b,int))return w,bprint("sigmoid(0) = " + str(sigmoid(0))) print("sigmoid(9.2) = " + str(sigmoid(9.2)))dim = 2 w, b = initialize_with_zeros(dim) print("w = " + str(w)) print("b = " + str(b))def propagate(w,b,X,Y):"""Implement the cost function and its gradient for the propagation explained aboveArguments:w -- weights, a numpy array of size (num_px * num_px * 3, 1)b -- bias, a scalarX -- data of size (num_px * num_px * 3, number of examples)Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)Return:cost -- negative log-likelihood cost for logistic regressiondw -- gradient of the loss with respect to w, thus same shape as wdb -- gradient of the loss with respect to b, thus same shape as bTips:- Write your code step by step for the propagation"""m=X.shape[1]#正向傳播A=sigmoid(np.dot(w.T,X)+b)cost=(-1/m)*np.sum(Y*np.log(A)+(1-Y)*np.log(1-A))#整個訓練集的損失函數#反向傳播dw=1/m*np.dot(X,(A-Y).T)db=1/m*np.sum(A-Y)assert(dw.shape==w.shape)assert(db.dtype==float)cost=np.squeeze(cost)assert(cost.shape==())grads={'dw':dw,'db':db,}return grads,costw, b, X, Y = np.array([[1], [2]]), 2, np.array([[1, 2],[3, 4]]), np.array([[1, 0]]) grads, cost = propagate(w, b, X, Y) print("dw = " + str(grads["dw"])) print("db = " + str(grads["db"])) print("cost = " + str(cost))def optimize(w,b,X,Y,num_iterations,learning_rate,print_cost=False):"""This function optimizes w and b by running a gradient descent algorithmArguments:w -- weights, a numpy array of size (num_px * num_px * 3, 1)b -- bias, a scalarX -- data of shape (num_px * num_px * 3, number of examples)Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)num_iterations -- number of iterations of the optimization looplearning_rate -- learning rate of the gradient descent update ruleprint_cost -- True to print the loss every 100 stepsReturns:params -- dictionary containing the weights w and bias bgrads -- dictionary containing the gradients of the weights and bias with respect to the cost functioncosts -- list of all the costs computed during the optimization, this will be used to plot the learning curve.Tips:You basically need to write down two steps and iterate through them:1) Calculate the cost and the gradient for the current parameters. Use propagate().2) Update the parameters using gradient descent rule for w and b."""costs=[]for i in range(num_iterations):grads,cost=propagate(w,b,X,Y)dw=grads['dw']db=grads['db']w=w-learning_rate*dwb=b-learning_rate*dbif i%100==0:costs.append(cost)if print_cost and i%100==0:print("Cost after iteration %i:%f "%(i,cost))params={'w':w,'b':b,}grads={'dw':dw,'db':db,}return params,grads,costsparams, grads, costs = optimize(w,b,X,Y,num_iterations=100,learning_rate=0.009,print_cost=False)print("w = " + str(params["w"])) print("b = " + str(params["b"])) print("dw = " + str(grads["dw"])) print("db = " + str(grads["db"]))def predict(w,b,X):'''Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)Arguments:w -- weights, a numpy array of size (num_px * num_px * 3, 1)b -- bias, a scalarX -- data of size (num_px * num_px * 3, number of examples)Returns:Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X'''m=X.shape[1]Y_prediction=np.zeros((1,m))w=w.reshape(X.shape[0],1)# Compute vector "A" predicting the probabilities of a cat being present in the picture### START CODE HERE ### (≈ 1 line of code)A=sigmoid(np.dot(w.T,X)+b)### END CODE HERE ###for i in range(A.shape[1]):# Convert probabilities a[0,i] to actual predictions p[0,i]### START CODE HERE ### (≈ 4 lines of code)if A[0,i] >0.5 :Y_prediction[0,i]=1else:Y_prediction[0,i]=0assert(Y_prediction.shape==(1,m))return Y_predictionprint("predictions = " + str(predict(w, b, X)))def model(X_train,Y_train,X_test,Y_test,num_iterations=2000,learning_rate=0.5,print_cost=False):"""Builds the logistic regression model by calling the function you've implemented previouslyArguments:X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)num_iterations -- hyperparameter representing the number of iterations to optimize the parameterslearning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()print_cost -- Set to true to print the cost every 100 iterationsReturns:d -- dictionary containing information about the model."""w,b=initialize_with_zeros(X_train.shape[0])# Gradient descent (≈ 1 line of code)parameters,grads,costs=optimize(w,b,X_train,Y_train,num_iterations,learning_rate,print_cost)# Retrieve parameters w and b from dictionary "parameters"w=parameters['w']b=parameters['b']# Predict test/train set examples (≈ 2 lines of code)Y_predict_train=predict(w,b,X_train)Y_predict_test = predict(w, b, X_test)# Print train/test Errorsprint("train accuracy: {} %".format(100 - np.mean(np.abs(Y_predict_train - Y_train)) * 100))print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_predict_test - Y_test)) * 100))d = {"costs": costs,"Y_prediction_test": Y_predict_test,"Y_prediction_train" : Y_predict_train,"w" : w,"b" : b,"learning_rate" : learning_rate,"num_iterations": num_iterations}return dd = model(train_set_x,train_set_y,test_set_x,test_set_y,num_iterations=2000,learning_rate=0.005,print_cost=True)# Plot learning curve (with costs) costs = np.squeeze(d['costs']) plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(d["learning_rate"])) plt.show()進一步分析
提醒:為了讓梯度下降有效,你必須明智地選擇學習速度。學習率α\alphaα決定了我們更新參數的速度。如果學習率太大,我們可能會“超出”最佳值。類似地,如果它太小,我們將需要太多的迭代來收斂到最佳值。這就是為什么使用良好的學習速度是至關重要的。
讓我們將我們模型的學習曲線與幾種學習率的選擇進行比較。運行下面的單元格。這大約需要1分鐘。也可以嘗試不同于我們初始化的learning_rates變量所包含的三個值,看看會發生什么。
learning_rates = [0.01, 0.001, 0.0001] models = {} for i in learning_rates:print ("learning rate is: " + str(i))models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)print ('\n' + "-------------------------------------------------------" + '\n')for i in learning_rates:plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))plt.ylabel('cost') plt.xlabel('iterations')legend = plt.legend(loc='upper center', shadow=True) frame = legend.get_frame() frame.set_facecolor('0.90') plt.show()總結
以上是生活随笔為你收集整理的吴恩达深度学习coursework1的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 在线YAML转JSON工具
- 下一篇: 深度学习发展历程(2012年以前)