0% found this document useful (0 votes)
2 views10 pages

Cnn

The document describes a convolutional layer class in Python, including methods for forward and backward propagation. It initializes weights and biases, performs convolution operations, and computes gradients for backpropagation. Additionally, it outlines data preparation and model training processes using a CNN model with visualization of results.

Uploaded by

spyhackboy
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
2 views10 pages

Cnn

The document describes a convolutional layer class in Python, including methods for forward and backward propagation. It initializes weights and biases, performs convolution operations, and computes gradients for backpropagation. Additionally, it outlines data preparation and model training processes using a CNN model with visualization of results.

Uploaded by

spyhackboy
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 10

class ConvLayer:

def __init__(self, in_channels, out_channels, kernel_size, stride = 1, padding=0):


self.Cin = in_channels
self.Cout = out_channels
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride

f_in = in_channels * kernel_size * kernel_size


f_out = out_channels * kernel_size * kernel_size
limit = np.sqrt(6.0/(f_in + f_out))

w_shape = (self.Cout, self.Cin, kernel_size, kernel_size)

self.W = np.random.uniform(-limit, limit, w_shape) # (OC, IC, K, K)


self.B = np.zeros((self.Cout, 1)) # (OC, 1)

self.inp_cache = None
self.dW = None
self.dB = None

def forward(self, x):


# Conv Layer
N, C, H, W = x.shape

xp = self.pad(x)
oh, ow = self.out_size(H), self.out_size(W)
out = np.zeros((N, self.Cout, oh, ow))
self.inp_cache = x

for b in range(N):
for oc in range(self.Cout):
for i in range(oh):
for j in range(ow):
h_start = i * self.stride
w_start = j * self.stride
h_end = h_start + self.kernel_size
w_end = w_start + self.kernel_size
region = xp[b, :, h_start:h_end, w_start:w_end] # (N, IC, K, K)
out[b, oc, i, j] = np.sum(region * self.W[oc]) + self.B[oc].item()
return out

def backward(self, dout):


X = self.inp_cache
N, C, H, W = X.shape
xp = self.pad(X)

_, _, oh, ow = dout.shape

self.dW = np.zeros_like(self.W)
self.dB = np.zeros_like(self.B)
dxp = np.zeros_like(xp)

for b in range(N):
for oc in range(self.Cout):
for i in range(oh):
for j in range(ow):
h_start = i * self.stride
w_start = j * self.stride
h_end = h_start + self.kernel_size
w_end = w_start + self.kernel_size

# gradiesnt wrt tp weigths self.W


region = xp[b, :, h_start:h_end, w_start:w_end] # (N, IC, K, K)
self.dW[oc] += dout[b, oc, i, j] * region

# gradiesnt wrt to bias term self.B


self.dB[oc] += dout[b, oc, i, j]

# gradiesnt wrt. to padded x i.e., xp


dxp[b, :, h_start:h_end, w_start:w_end] = dout[b, oc, i, j] * self

dx = self.unpad(dxp)
return dx
x = df.iloc[:4500, 1:]
y = df.iloc[:4500, 0]
x = x.values.reshape(-1, 1, 28, 28)
y = one_hot(y, 10)

x.shape, y.shape
model = CNN()
model, tl, va = train(model, x[:3000], y[:3000], lr = 0.001, epochs=25)
visualize_results(tl, va)
evaluate(model, x[3000:4000], y[3000:4000])

for i in range(4000, 4500, 40):


plot(i)

You might also like