【深度学习基础】由梯度下降实现的猫脸识别

MEGALOVANIA 2020-03-25 0:03:57 2020-04-10 3:16:27浏览次数:

作为一个瑟瑟发抖的蒟蒻,我决定水一篇基础。

训练模型

一、激活函数

S = \frac{1}{(1 + ln_{-z})}

import numpy as np
def sigmoid(z):
    s = 1 / (1 + np.exp(-z))
    return s

二、初始化

def initialize_with_zeros(dim):
    w = np.zeros([dim, 1])              #初始权重数量
    b = 0                               #偏置值
    assert(w.shape == (dim, 1))         #确认形状
    assert(isinstance(b, float) or isinstance(b, int))
    return w, b

三、计算导数

def propagate(w, b, X, Y):
    m = X.shape[1]           #样例数量
    A = sigmoid(np.dot(w.T, X) + b)  #计算激活函数
    cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A), axis = 1, keepdims = True)  #代价函数
    dw = 1 / m * np.dot(X, (A - Y).T)       #权重的导数
    db = 1 / m * np.sum(A - Y, axis = 1, keepdims = True)  #偏置值的导数
    assert(dw.shape == w.shape)  #确认形状
    assert(db.dtype == float)  #确认形状
    cost = np.squeeze(cost)   #删除维度
    assert(cost.shape == ())  #确认形状
    grads = {"dw": dw,
             "db": db}     #打包
    return grads, cost

四、优化迭代

def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
    costs = []
    for i in range(num_iterations):
        grads, cost = propagate(w, b, X, Y) #调用计算函数
        dw = grads["dw"]
        db = grads["db"]
        w = w - learning_rate * dw   #更新权重值
        b = b - learning_rate * db   #更新偏置值
        if i % 100 == 0:
            costs.append(cost)       #每迭代100次记录一次
        if print_cost and i % 100 == 0:
            print ("Cost after iteration %i: %f" %(i, cost))
    params = {"w": w,
              "b": b}
    grads = {"dw": dw,      #打包
             "db": db}
    return params, grads, costs

调用模型

五、预测图片

def predict(w, b, X):
    m = X.shape[1]    #样例数量
    Y_prediction = np.zeros((1,m))  #初始化
    w = w.reshape(X.shape[0], 1)    #确认形状
    A = sigmoid(np.dot(w.T, X) + b) #激活函数
    for i in range(A.shape[1]):     #遍历样例
        if A.T[i] <= 0.5:           #可能性50%以下为假
            Y_prediction.T[i] = 0
        else:                       #可能性50%以上为真
            Y_prediction.T[i] = 1
    assert(Y_prediction.shape == (1, m))
    return Y_prediction

六、全局函数

def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
    w, b = initialize_with_zeros(12288)  #初始化,图片尺寸为64*64
    parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)   #迭代开始
    w = parameters["w"]
    b = parameters["b"]
    Y_prediction_test = predict(w, b, X_test)   #预测测试集
    Y_prediction_train = predict(w, b, X_train) #预测训练集
    print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) #训练集准确率
    print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) #测试集准确率
    d = {"costs": costs,
         "Y_prediction_test": Y_prediction_test, 
         "Y_prediction_train" : Y_prediction_train, 
         "w" : w, 
         "b" : b,
         "learning_rate" : learning_rate,
         "num_iterations": num_iterations}
    return d   #返回结果模型

共 1 条回复

xiaosuan2b
好水啊

我是废物