• 自己动手实现一个网络模型 python


    转自:https://www.jianshu.com/p/4b30e1dd2252
    
    
    common_funcs.py

    import
    numpy as np import matplotlib.pyplot as plt def sigmoid(x): return 1/(1+np.exp(-x)) def softmax(a): exp_a = np.exp(a) sum_exp_a = np.sum(exp_a) y = exp_a / sum_exp_a return y def cross_entroy_error(y, t): if y.ndim == 1: t = t.reshape(1, t.size) y = y.reshape(1, y.size) if t.size == y.size: t = t.argmax(axis=1) batch_size = y.shape[0] return -np.sum(np.log(y[np.arange(batch_size), t]+1e-7))/batch_size def numerical_gradient(f, x): h = 1e-4 grad = np.zeros_like(x) it = np.nditer(x,flags = ['multi_index'], op_flags = ['readwrite']) while not it.finished: idx = it.multi_index tmp_val = x[idx] x[idx] = float(tmp_val) + h fxh1 = f(x) x[idx] = tmp_val-h fxh2 = f(x) grad[idx] = (fxh1 - fxh2)/(2*h) x[idx] = tmp_val it.iternext() return grad def function_2(x): return np.sum(x**2)
    TeoLayerNet.py
    import
    numpy as np from common_funcs import sigmoid, softmax, cross_entroy_error,numerical_gradient class TwoLayerNet: def __init__(self, input_size, hidden_size, output_size, weight_init_std = 0.01): self.params = {'w1':weight_init_std*np.random.randn(input_size,hidden_size), 'b1':np.zeros(hidden_size), 'w2':weight_init_std*np.random.randn(hidden_size,output_size), 'b2':np.zeros(output_size)} def predict(self,x): w1, w2 = self.params['w1'], self.params['w2'] b1, b2 = self.params['b1'], self.params['b2'] #1 a1 = np.dot(x, w1) + b1 #zhuyi x, w1de weizhi z1 = sigmoid(a1) #2 a2 = np.dot(z1, w2) + b2 y = softmax(a2) return y def loss(self, x, t): y = self.predict(x) return cross_entroy_error(y, t) def accuracy(self, x, t): y = self.predict(x) y = np.argmax(y, axis=1) t = np.argmax(t, axis=1) accuracy = np.sum(y == t)/float(x.shap[0]) return accuracy def gradient(self, x, t): loss_w = lambda w: self.loss(x, t) grads = {} grads['w1'] = numerical_gradient(loss_w, self.params['w1']) grads['b1'] = numerical_gradient(loss_w, self.params['b1']) grads['w2'] = numerical_gradient(loss_w, self.params['w2']) grads['b2'] = numerical_gradient(loss_w, self.params['b2']) return grads
    minist.py
    try
    : import urllib.request except ImportError: raise ImportError('You should use Python 3.x') import os.path import gzip import pickle import os import numpy as np url_base = 'http://yann.lecun.com/exdb/mnist/' key_file = { 'train_img': 'train-images-idx3-ubyte.gz', 'train_label': 'train-labels-idx1-ubyte.gz', 'test_img': 't10k-images-idx3-ubyte.gz', 'test_label': 't10k-labels-idx1-ubyte.gz' } dataset_dir = os.path.dirname(os.path.abspath(__file__)) save_file = dataset_dir + "/mnist.pkl" train_num = 60000 test_num = 10000 img_dim = (1, 28, 28) img_size = 784 def _download(file_name): file_path = dataset_dir + "/" + file_name if os.path.exists(file_path): return print("Downloading " + file_name + " ... ") urllib.request.urlretrieve(url_base + file_name, file_path) print("Done") def download_mnist(): for v in key_file.values(): _download(v) def _load_label(file_name): file_path = dataset_dir + "/" + file_name print("Converting " + file_name + " to NumPy Array ...") with gzip.open(file_path, 'rb') as f: labels = np.frombuffer(f.read(), np.uint8, offset=8) print("Done") return labels def _load_img(file_name): file_path = dataset_dir + "/" + file_name print("Converting " + file_name + " to NumPy Array ...") with gzip.open(file_path, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=16) data = data.reshape(-1, img_size) print("Done") return data def _convert_numpy(): dataset = {} dataset['train_img'] = _load_img(key_file['train_img']) dataset['train_label'] = _load_label(key_file['train_label']) dataset['test_img'] = _load_img(key_file['test_img']) dataset['test_label'] = _load_label(key_file['test_label']) return dataset def init_mnist(): download_mnist() dataset = _convert_numpy() print("Creating pickle file ...") with open(save_file, 'wb') as f: pickle.dump(dataset, f, -1) print("Done!") def _change_one_hot_label(X): T = np.zeros((X.size, 10)) for idx, row in enumerate(T): row[X[idx]] = 1 return T def load_mnist(normalize=True, flatten=True, one_hot_label=False): """读入MNIST数据集 Parameters ---------- normalize : 将图像的像素值正规化为0.0~1.0 one_hot_label : one_hot_label为True的情况下,标签作为one-hot数组返回 one-hot数组是指[0,0,1,0,0,0,0,0,0,0]这样的数组 flatten : 是否将图像展开为一维数组 Returns ------- (训练图像, 训练标签), (测试图像, 测试标签) """ if not os.path.exists(save_file): init_mnist() with open(save_file, 'rb') as f: dataset = pickle.load(f) if normalize: for key in ('train_img', 'test_img'): dataset[key] = dataset[key].astype(np.float32) dataset[key] /= 255.0 if one_hot_label: dataset['train_label'] = _change_one_hot_label(dataset['train_label']) dataset['test_label'] = _change_one_hot_label(dataset['test_label']) if not flatten: for key in ('train_img', 'test_img'): dataset[key] = dataset[key].reshape(-1, 1, 28, 28) return (dataset['train_img'], dataset['train_label']), (dataset['test_img'], dataset['test_label'])
    main.py
    import
    numpy as np import matplotlib.pyplot as plt from mnist import load_mnist from TwoLayerNet import TwoLayerNet (x_train, t_train),(x_test, t_test) = load_mnist(normalize = True, one_hot_label = True) train_loss_list = [] train_acc_list = [] test_acc_list = [] iter_num = 10000 train_size = x_train.shape[0] batch_size = 100 learning_rate = 0.1 iter_per_epoch = max(train_size / batch_size, 1) net_work = TwoLayerNet(input_size=784, hidden_size=50, output_size=10) for i in range(iter_num): batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] grad = net_work.gradient(x_batch, t_batch) for key in ('w1', 'b1', 'w2', 'b2'): net_work.params[key] -= learning_rate*grad[key] if i % iter_per_epoch == 0: loss = net_work.loss(x_train, t_train) train_loss_list.append(loss) train_acc = net_work.accuracy(x_train, t_train) train_acc_list.append(train_acc) test_acc = net_work.accuracy(x_test, t_test) test_acc_list.append(test_acc) print('run... loss:{} train acc:{} test acc:{}'.format(loss, train_acc,test_acc))
  • 相关阅读:
    centos7手动搭建redis集群
    Xshell突破四个窗口限制
    Redis官方集群规范
    Redis官方集群教程
    centos7 更新阿里YUM源
    gitlab配置ssh
    Java前端控制器模式~
    Java数据访问对象模式
    Java组合实体模式~
    Java业务代理模式~
  • 原文地址:https://www.cnblogs.com/zhibei/p/12070079.html
Copyright © 2020-2023  润新知