1 # -*- coding: utf-8 -*-
  2 """
  3 Created on Wed Jan 31 14:10:03 2018
  4 
  5 @author: markli
  6 """
  7 import numpy as np;
  8 
  9 def ReLU(x):  
 10     return max(0,x);
 11 
 12 class CovolutionLayer:
 13     """
 14     卷积层包含卷积和汇合两步操作
 15     """
 16     def __init__(self,filters,padding,pooling,action_fun=ReLU):
 17         """
 18         filters 包含 filter_size filter_num filter_stride
 19         filter_size 过滤器(卷积核)大小 [行,列]
 20         filter_num  过滤器(卷积核)的个数
 21         filter_stride 卷积步长
 22         padding 填充的大小 填充0值
 23         pooling 包含 pooling_size pooling_stride pooling_classic    
 24         pooling_size 池化时的大小 [行,列] 池化矩阵为方阵
 25         pooling_stride 池化时的步长 一般情况下大小等于pooling_size
 26         pooling_classic 汇合类型 类型包括 最大值汇合 max, 平均值汇合 average
 27         action_fun 卷积操作的激活函数
 28         """
 29         self.f_size = (filters[0],filters[1]);
 30         self.f_num = filters[2];
 31         self.f_stride = filters[3];
 32         self.padding = padding;
 33         self.p_size = (pooling[0],pooling[1]);
 34         self.p_stride = pooling[2]
 35         self.p_classic = pooling[3];
 36         self.action_fun = action_fun;
 37         
 38         self.weights = [];
 39         for i in range(self.f_num):
 40             weight = np.random.randn(self.f_size[0],self.f_size[1]);
 41             self.weights.append(weight);
 42         
 43         self.biase = np.random.randn(self.f_num);
 44         
 45     def Convolution(self,X):
 46         """
 47         X 为二维数组
 48         """
 49         #获得初始X的形状
 50         n,m = X.shape;
 51         #填充
 52         if(self.padding != 0):
 53             ones = np.zeros((n+2*self.padding,m+2*self.padding));
 54             ones[self.padding:self.padding+n,self.padding:self.padding+m] = X;
 55             X = ones;
 56         
 57         #获得填充后X的形状
 58         n,m = X.shape;
 59         
 60         #求得卷积操作降维后的层的大小
 61         t = int((n-self.f_size[0])/ self.f_stride) + 1; #行数
 62         l = int((m-self.f_size[1]) / self.f_stride) + 1; #列数
 63         
 64         #求得池化后降维的大小
 65         t_p = int((t-self.p_size[0]) / self.p_stride) + 1; #池化层的行数
 66         l_p = int((l-self.p_size[1]) / self.p_stride) + 1; #池化层的列数
 67         
 68         
 69         
 70         self.convs = [];
 71         self.pools = []
 72         for k in range(self.f_num):
 73             conv = np.ones((t,l));
 74             pool = np.ones((t_p,l_p));
 75             row = 0;
 76             #卷积
 77             for i in range(l): 
 78                 col = 0;
 79                 for j in range(t):
 80                     temp = X[row:row+self.f_size[0],col:col+self.f_size[1]];
 81                     z = np.sum(np.multiply(self.weights[k],temp)) + self.biase[k];
 82                     a = self.action_fun(z);
 83                     conv[i][j] = a;
 84                     col = col + self.f_stride;
 85                 
 86                 row = row + self.f_stride;
 87             
 88             self.convs.append(conv);
 89         
 90             #池化
 91             row = 0;
 92             for i in range(t_p):
 93                 col = 0;
 94                 for j in range(l_p):
 95                     temp = conv[row:row+self.p_size[0],col:col+self.p_size[1]];
 96                     if(self.p_classic == "average"):
 97                         pool[i][j] = np.sum(temp) / (self.p_size[0] * self.p_size[1]);
 98                     else:
 99                         pool[i][j] = np.max(temp);
100                         
101                     col = col + self.p_stride;
102                     
103                 row = row + self.p_stride;
104                 
105             self.pools.append(pool);
106 
107 X = np.array([[18,54,51,239,244,188],
108               [55,121,75,78,95,88],
109               [35,24,104,113,109,221],
110               [3,154,104,235,25,130],
111               [15,253,225,159,78,233],
112               [68,85,180,214,215,0]]);
113 
114 #X 归一化处理
115 X = (X - np.sum(X)/36) / np.max(X);
116 #print(X.shape)
117     
118 con = CovolutionLayer([3,3,2,1],1,[2,2,2,"max"],);
119 con.Convolution(X);
120 print(con.pools);

目前只能实现二维的操作,三维的实现还没想好如何存储。卷积神经网络的存储和计算是真的很复杂,过段时间想好了在实现。

版权声明:本文为FightLi原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://www.cnblogs.com/FightLi/p/8400765.html