# ulvis.paste.net

Paste Search Dynamic
Recent pastes
full convolution
1. import random
2. from math import exp
3.
4. def full_convolution(input, filter):
5.         n = len(input)
6.         m = len(input[0])
7.         filterx = len(filter)
8.         filtery = len(filter[0])
9.         result = [[0 for x in range(m + filtery)] for y in range(n + filterx)]
10.         for i in range(n + filterx):
11.                 for i2 in range(m + filtery):
12.                         for x in range(i, i + filterx):
13.                                 for y in range(i2, i2 + filtery):
14.                                         if(x - filterx < n and y - filtery < m and x >= filterx and y >= filtery):
15.                                                 result[i][i2] += input[x - filterx][y - filtery] * filter[x - i][y - i2]
16.         return result
17.
18. def convolution(input, filter):
19.         n = len(input)
20.         m = len(input[0])
21.         filterx = len(filter)
22.         filtery = len(filter[0])
23.         result = [[0 for x in range(m - filtery + 1)] for y in range(n - filterx + 1)]
24.         for i in range(n - filterx + 1):
25.                 for i2 in range(m - filtery + 1):
26.                         for x in range(i, i + filterx):
27.                                 for y in range(i2, i2 + filtery):
28.                                         result[i][i2] += input[x][y] * filter[x - i][y - i2]
29.         return result
30.
32.         return [[mat1[x][y] + mat2[x][y] for y in range(len(mat1[0]))] for x in range(len(mat1))]
33.
34. def flip(mat):
35.         return [[mat[len(mat) - x - 1][len(mat[0]) - y - 1] for y in range(len(mat[0]))] for x in range(len(mat))]
36.
37. class Layer:
38.         def __init__(self, n, m, layers, filterx, filtery, filtercount):
39.                 self.n = n
40.                 self.m = m
41.                 self.layers = layers
42.                 self.filterx = filterx
43.                 self.filtery = filtery
44.                 self.filtercount = filtercount
45.                 self.filters = [[[[random.uniform(-1, 1) for q in range(filtery)] for k in range(filterx)] for j in range(layers)] for i in range(filtercount)]
46.
47.         def compute(self, inputs):
48.                 self.inputs = inputs
49.                 self.result = [[[0 for k in range(self.m - self.filtery + 1)]for j in range(self.n - self.filterx + 1)] for i in range(self.filtercount)]
50.                 for fil in range(self.filtercount):
51.                         for lay in range(self.layers):
52.                                 #print(convolution(inputs[lay], self.filters[fil][lay]))
53.                                 #print(self.result[fil])
54.                                 self.result[fil] = add(self.result[fil], convolution(inputs[lay], self.filters[fil][lay]))
55.                 return self.result
56.
57.         def backpropagate(self, errors):
58.                 self.filtererror = [[[[0 for q in range(self.filtery)] for k in range(self.filterx)] for j in range(self.layers)] for i in range(self.filtercount)]
59.                 inputerror = [[[0 for k in range(self.m)] for j in range(self.n)] for i in range(self.layers)]
60.                 for fil in range(self.filtercount):
61.                         for lay in range(self.layers):
62.                                 inputerror[lay] = add(inputerror[lay], full_convolution(errors[fil], flip(self.filters[fil][lay])))
63.                                 self.filtererror[fil][lay] = convolution(self.inputs[lay], errors[fil])
64.                 return inputerror
65.
66.         def cleanup(self):
67.                 del self.filtererror
68.                 del self.inputs
69.                 del self.result
70.
71.         def learning(self, d):
72.                 for fil in range(self.filtercount):
73.                         for lay in range(self.layers):
74.                                 for i in range(self.filterx):
75.                                         for i2 in range(self.filtery):
76.                                                 self.filters[fil][lay][i][i2] += d * self.filtererror[fil][lay][i][i2]
77.                 self.cleanup()
78.
79. class Maxpool:
80.         def __init__(self, n, m, layers, poolingx, poolingy):
81.                 self.n = n
82.                 self.m = m
83.                 self.layers = layers
84.                 self.poolingx = poolingx
85.                 self.poolingy = poolingy
86.
87.         def compute(self, inputs):
88.                 self.result = [[[(float("-inf"), 0, 0) for k in range(self.m)]for j in range(self.n)] for i in range(self.layers)]
89.                 result = [[[(0, 0, 0) for k in range(self.m)]for j in range(self.n)] for i in range(self.layers)]
90.                 for lay in range(self.layers):
91.                         for x in range(0, self.n / self.poolingx):
92.                                 for y in range(0, self.m / self.poolingy):
93.                                         for i in range(x * self.poolingx, x * self.poolingx + self.poolingx):
94.                                                 for i2 in range(y * self.poolingy, y * self.poolingy + self.poolingy):
95.                                                         self.result[lay][x][y] = max(self.result[lay][x][y], (inputs[lay][i][i2], i, i2))
96.                                 result[lay][x][y] = self.result[lay][x][y][0]
97.                 return result
98.
99.         def backpropagate(self, errors):
100.                 result = [[[0 for k in range(self.m)]for j in range(self.n)] for i in range(self.layers)]
101.                 for lay in range(self.layers):
102.                         for x in range(0, self.n / self.poolingx):
103.                                 for y in range(0, self.m / self.poolingy):
104.                                         result[lay][self.result[lay][x][y][1]][self.result[lay][x][y][2]] = errors[lay][x][y]
105.                 return result
106.
107.         def cleanup(self):
108.                 del self.result
109.
110.         def learning(self, d):
111.                 self.cleanup()
112.
113. class ReLu:
114.         def __init__(self, n, m, layers, f, fp):
115.                 self.n = n
116.                 self.m = m
117.                 self.layers = layers
118.                 self.f = f
119.                 self.fp = fp
120.
121.         def compute(self, inputs):
122.                 self.inputs = inputs
123.                 result = [[[0 for k in range(self.m)]for j in range(self.n)] for i in range(self.layers)]
124.                 for lay in range(self.layers):
125.                         for x in range(self.n):
126.                                 for y in range(self.m):
127.                                         result[lay][x][y] = self.f(inputs[lay][x][y])
128.                 return result
129.
130.         def backpropagate(self, errors):
131.                 result = [[[0 for k in range(self.m)]for j in range(self.n)] for i in range(self.layers)]
132.                 for lay in range(self.layers):
133.                         for x in range(self.n):
134.                                 for y in range(self.m):
135.                                         result[lay][x][y] = errors[lay][x][y] * self.fp(self.inputs[lay][x][y])
136.                 return result
137.
138.         def cleanup(self):
139.                 del self.inputs
140.
141.         def learning(self, d):
142.                 self.cleanup()
143.
144.
145. class FullyConnected:
146.         def __init__(self, n, m, layers, hidden, f, fp):
147.                 self.n = n
148.                 self.m = m
149.                 self.layers = layers
150.                 self.hidden = hidden
151.                 self.weights =  [[[[random.uniform(-1, 1) for q in range(self.m)] for k in range(self.n)] for j in range(self.layers)]for i in range(self.hidden)]
152.                 self.bias = [random.uniform(-1, 1) for k in range(self.hidden)]
153.                 self.f = f
154.                 self.fp = fp
155.
156.         def compute(self, inputs):
157.                 self.inputs = inputs
158.                 self.result = [[[self.bias[k] for k in range(self.hidden)]for j in range(1)]for i in range(1)]
159.                 result = [[[0 for k in range(self.hidden)]for j in range(1)]for i in range(1)]
160.                 #result = [[[0 for k in xrange(self.hidden)]for j in xrange(1)]for i in xrange(1)]
161.                 for i in range(self.hidden):
162.                         for lay in range(self.layers):
163.                                 for x in range(self.n):
164.                                         for y in range(self.m):
165.                                                 self.result[0][0][i] += inputs[lay][x][y] * self.weights[i][lay][x][y]
166.                         result[0][0][i] = f(self.result[0][0][i])
167.                 return result
168.
169.         def backpropagate(self, errors):
170.                 self.errors = errors
171.                 result = [[[0 for k in range(self.m)]for j in range(self.n)] for i in range(self.layers)]
172.                 for i in range(self.hidden):
173.                         for lay in range(self.layers):
174.                                 for x in range(self.n):
175.                                         for y in range(self.m):
176.                                                 result[lay][x][y] += self.fp(self.result[0][0][i]) * errors[0][0][i] * self.weights[i][lay][x][y]
177.                 return result
178.
179.
180.         def cleanup(self):
181.                 del self.inputs
182.                 del self.errors
183.                 del self.result
184.
185.         def learning(self, d):
186.                 for i in range(self.hidden):
187.                         for lay in range(self.layers):
188.                                 for x in range(self.n):
189.                                         for y in range(self.m):
190.                                                 self.weights[i][lay][x][y] += d * self.inputs[lay][x][y] * self.errors[0][0][i] * self.fp(self.result[0][0][i])
191.                         self.bias[i] += d * self.fp(self.result[0][0][i]) * self.errors[0][0][i]
192.                 self.cleanup()
193.
194. def f(x):
195.         return 1 / (1 + exp(-x))
196.
197. def fp(x):
198.         return f(x) * (1 - f(x))
199.
200. random.seed()
201. fl = Layer(3, 3, 1, 1, 1, 1)
202. fl2 = FullyConnected(3, 3, 1, 1, f, fp)
203. testcase = [[[[0 for k in range(3)]for j in range(3)] for i in range(1)] for q in range(512)]
204. output = [[[[0 for k in range(1)]for j in range(1)] for i in range(1)] for q in range(512)]
205. #testcase[0][0] = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
206. for i in range(1, 10):
207.         testcase[i][0] = [[random.randint(0, 1) for k in range(3)]for j in range(3)]
208.         output[i][0][0][0] = 0
209. output[0][0][0][0] = 1
210. for i in range(30000):
211.         #print(fl.weights)
212.         x = random.randint(0, 10)
213.         out = fl.compute(testcase[x])
214.         out = fl2.compute(out)
215.         error = [[[(output[x][0][0][0] - out[0][0][0]) for k in range(1)]for j in range(1)] for i in range(1)]
216.         back = fl2.backpropagate(error)
217.         back = fl.backpropagate(back)
218.         tau = 0.3
219.         fl.learning(tau)
220.         fl2.learning(tau)
221.         #print(out)
222.         if(i % 1000 == 0):
223.                 print(back)
224.         #print(fl.weights)
225. print(fl.filters)
226. print(fl2.weights)
227. for i in range(10):
228.         print(testcase[i][0])
229.         print((fl2.compute(fl.compute(testcase[i]))[0][0][0]))
Parsed in 0.061 seconds