Fresco
Fresco
mp3""></audio>
<div>
<button onclick=""document.getElementById('demo').play()"">
Play the Audio </button>
</div>
def measures(arr):
#Write your code here
'''
Input: arr : numpy array
Return : mean,median,std_deviation,variance,mode,iqr : float
Note:
1. Assign the values to designated variables
2. Round off to 2 decimal places
'''
n=len(arr)
mean=np.mean(arr)
median=np.median(arr)
std_deviation=round(np.std(arr),2)
variance=round(np.var(arr),2)
mode=int(stats.mode(numbers)[0])
iqr=(np.percentile(arr,75,interpolation='midpoint')-
np.percentile(arr,25,interpolation='midpoint'))
return mean,median,std_deviation,variance,mode,iqr
if __name__=='__main__':
array1=[]
n=int(input())
for i in range(n):
array1.append(float(input()))
narray1=np.array(array1)
print(measures(narray1))
import math
red=math.factorial(10)/((math.factorial(7))*math.factorial(3))
blue=math.factorial(8)/((math.factorial(3))*math.factorial(5))
print(red*blue)
from scipy import stats
import numpy as np
import statistics
data ={"A": 90,"B": 86,"C":70,"D":95,"E":95,"F":95,"G":95}
values = list(data.values())
print("Mean")
print(np.mean(values))
print("Median")
print(np.median(values))
print("Mode")
print(statistics.mode(values))
print("Standard Deviation")
print(np.std(values))
print("Variance")
print(np.var(values))
print("range")
print(stats.iqr(values))
Find
prob_getting_even=3/6
prob_getting_prime=3/6
prob_even_and_prime=1/6
prob_even_or_prime=0.5+0.5-0.16
print(prob_even_or_prime)
prob_getting_king=1/13
prob_getting_queen=1/13
prob_king_or_queen=2/13
print(prob_king_or_queen)
Result
0.153
Question:
80 % of people who purchase pet insurance are women. If 9 pet insurance owners are
randomly selected, find the probability that precisely 6 are women.
Solution:
#n=9
#p=0.80
#k=6
from scipy import stats
probability=stats.binom.pmf(6,9,0.80)
print(probability)
Question:
Python Code
Output
def chi_test():
'''
Output
1. stat: Float
2. dof : Integer
3. p_val: Float
4. res: String
'''
#Note: Round off the Float values to 2 decimal places.
table = [[18, 36, 21, 9, 6], [12, 36, 45, 36, 21], [6, 9, 9, 3, 3], [3, 9, 9,6,
3]]
stat,p,dof,expected= chi2_contingency(table)
if p<=0.05:
res='Reject the Null Hypothesis'
else:
res='Failed to reject the Null Hypothesis'
return stat,dof,p_val
if __name__=='__main__':
print(chi_test())
Dataset Download
Dataset Download
curl http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>cifar-10-python.tar.gz
import os
import numpy as np
def _load_cifar10_batch(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict['data'].reshape(-1, 32, 32, 3), dict['labels'] # reshaping the data
to 32 x 32 x 3
print('Loading...')
batch_fns = [os.path.join("./", 'cifar-10-batches-py', 'data_batch_' + str(i)) for
i in range(1, 6)]
data_batches = [_load_cifar10_batch(fn) for fn in batch_fns]
Data Stacking
Data Stacking
The batches loaded are stacked into a big array.
Subset Generation
The dataset with 50,000 samples is split in the ratio 92:8. This split is done
to take a smaller portion of 50000 samples (i.e the 8% contains only 4000 images).
These 4000 samples are used for generating the train and test sets for
classification.
Data Splitting
Data Splitting
4000 samples are split in the ratio 7:3. (i.e., 2800 for training and 1200 for
testing) using StratifiedShuffleSplit.
Normalization
You can also try out the same in python Sklearn library using the following link .
ZCA Whitening
The main aim of whitening is to reduce data redundancy, which means the features
are less correlated and have the same variance.
ZCA stands for zero-phase component analysis. ZCA whitened images resemble the
normal image.
ZCA Whitening
The main aim of whitening is to reduce data redundancy, which means the features
are less correlated and have the same variance.
ZCA stands for zero-phase component analysis. ZCA whitened images resemble the
normal image.
# Computing whitening matrix
train_data_flat = train_data.reshape(train_data.shape[0], -1).T
test_data_flat = test_data.reshape(test_data.shape[0], -1).T
print('train_data_flat: ', train_data_flat.shape)
print('test_data_flat: ', test_data_flat.shape)
train_data_flat_t = train_data_flat.T
test_data_flat_t = test_data_flat.T
The below code for SVD may not work in the available online cloud playground due to
package issues. So, it is better to try this out in a local Python environment.
The below code for SVD may not work in the available online cloud playground due to
package issues. So, it is better to try this out in a local Python environment.
High-dimensional spaces.
***********************************************************************************
***********************************************************
Since the data is in terms of length we need to scale to data to have normal
distribution.
import numpy as np
def normalize(data):
X_norm = normalize(X)
import numpy as np
#matrix a
a = np.array([[1,2],[3,4]])
#matrix b
b = np.array([[5,6,7],[8,9, 10]])
#matrix c = a.b
c = np.dot(a,b)
output:
[47 54 61]]
import numpy as np
#matrix a
a = np.array([[1,2],[3,4]])
#matrix b
b = np.array([[5,6],[8,9]])
c = np.multiply(a,b)
output:
[24 36]]
Broadcasting Examples
b = np.array([1, 2, 3])
c = a * b
print(c)
output:
[[10 20 30]
[20 40 60]
[30 60 90]]
b = 1
c = a + b
print(c)
output:
[[11 11 11]
[21 21 21]
[31 31 31]]
return x ** n
print(exp(a, 2))
output:
[[ 1 4 9]
[16 25 36]
[49 64 81]]
#Note that each element of array **a** has been raised to power 2
array1d = np.array([1,2,3,4])
print("shape of array1d before reshaping: ", array1d.shape)
array1d = array1d.reshape(1,4)
print("shape of array1d after reshaping: ", array1d.shape)
#rank of matrix can be found using np.linalg.matrix_rank() function
print("array1d is a martrix of rank {}".format(np.linalg.matrix_rank(array1d)))
output:
shape of array1d before reshaping: (4,)
shape of array1d after reshaping: (1, 4)
array1d is a martrix of rank 1
The shape (4,) just represents that the array has 4 elements.
The shape (1, 4) represents that array has 4 elements with one row and four
columns.
But for SNN we need to have the data of shape (no_of_features x no_of_samples).
So take a transpose of X_norm.
X_norm=X.reshape(100,4)
X_data = X_norm.T
Y_data = Y.reshape(1,100)
print(X_data.shape)
print(Y_data.shape)
output:
(4,100)
(1,100)
Before we start the forward propagation, we need to initialize weights and bias
to some random values.
Since we have four features, we need to have weight vector of shape (4,1) and
one bias term of shape (1,1).
def initialiseNetwork(num_features):
W = np.zeros((num_features, 1))
b = 0
parameters = {"W": W, "b": b}
return parameters
def sigmoid(z):
Forward Propagation
W = parameters["W"]
b = parameters["b"]
Z = np.dot(W.T,X) + b
A = sigmoid(Z)
return A
Defining Backpropagation
Using this output, you need to find the derivatives of weights and bias
dW = (np.dot(X,dZ.T))/num_samples
db = np.sum(dZ)/num_samples
return dW, db
Updating Parameters
Once we have the derivatives, you need to subtract them from original weights
and bias.
Using all the function defined so far let's define the model to initialize and
train the SNN.
Train the model using iris dataset with learning rate 0.1 and number of
iteration equal to 1000.
output:
You can see that at every iteration the cost is reducing approaching close to
zero.
***********************************************************************************
***********************************************************************
Now, you know that the input to a CNN is an N-dimensional data. If it's a digital
image then its dimension is
An additional dimension is added for the input which represents the number of
samples, so final dimension would be
***********************************************************************************
**********************************************************
Implementation using NumPy
A = np.random.randint(0,10, size= (3,3,3))
W = np.random.randint(0,10, size= (3,3,3))
print(scalar)
688
***********************************************************************************
***********************************************************
Padding - NumPy
def zero_pad(data, pad):
data_pad = numpy.pad(array = data, pad_width = ((0,0),(pad,pad), (pad,pad),
(0,0)), mode = 'constant', constant_values = 0)
return data_pad
***********************************************************************************
***********************************************************************
Strided Convolution Using NumPy
A detailed explation of this code is provided in the next card.