0% found this document useful (0 votes)
9 views

TP - JEUX - ML - Corrigé

Uploaded by

Youssef Jamma3
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
9 views

TP - JEUX - ML - Corrigé

Uploaded by

Youssef Jamma3
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 5

TPs-5 et 6, Algorithmes des jeux et Machine learning (Correction)

I. Algorithmes des jeux


Exercice 1 : (voir le fichier corrigé_ex_1_algo_jeux.pdf sur la plateforme)

II. Apprentissage automatique

Exercice 1 : Régression linéaire


Correction:
#libraries
import pandas as pd
#connection au drive
from google.colab import drive
drive.mount("/content/drive", force_remount=True)
data = pd.read_csv('/content/drive/My Drive/market.csv')

#visualisation
data.head()

#description
data.describe()
df = data

#selection de la cible
y = df['Close']

#selection des caracteriqtique sans cible


x = df.drop(columns=['Date','Close','Total Trade Quantity','Turnover (Lacs)'])

#Separation de la base de données en training data and testing data


from sklearn.model_selection import train_test_split
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.15)
print(train_x.shape )
print(test_x.shape)
print(train_y.shape)
print(test_y.shape)
#choix de modele
from sklearn.linear_model import LinearRegression
regression = LinearRegression()

#training
regression.fit(train_x, train_y)

#metrique d'evaluation
from sklearn.metrics import mean_squared_error
print("regression coefficient",regression.coef_)
mse = mean_squared_error(test_y, predicted)
print("mean squared error: ", mse)

#testing
predicted=regression.predict(test_x)

#visualisation de quelques resultats de test


dfr=pd.DataFrame({'actual_close':test_y, 'Predicted_close':predicted})
dfr.head(10)

Exercice 2 : Classification avec Random forest


Correction :
#libraries
import pandas as pd
#connection au drive
from google.colab import drive
drive.mount("/content/drive", force_remount=True)
data = pd.read_csv('/content/drive/My Drive/heart.csv')

#visualiser les ligne de la base de données


data.head()

#Description de la base de données


data.describe()
df = data

#selection de la cible
y = df['target']

#selection des caractériqtiques sans cible


x = df.drop(columns=['target'])

#Separation de la base de données en training data and testing data


from sklearn.model_selection import train_test_split
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.15)
print(train_x.shape )
print(test_x.shape)
print(train_y.shape)
print(test_y.shape)

#choix de modele
from sklearn.ensemble import RandomForestClassifier
classifier_rf = RandomForestClassifier()

#entrainement
classifier_rf.fit(train_x, train_y)

#evaluation
predictions = classifier_rf.predict(test_x)

#visualisation de quelque resultat


dfr=pd.DataFrame({'actual_etat':test_y, 'Predicted_disease':predictions})
dfr.head(10)

#evaluation metrique
#evaluation metrique
from sklearn.metrics import accuracy_score,confusion_matrix
print(accuracy_score(test_y, predictions))
confusion_matrix(test_y, predictions)
# 1 0
#1 TP FP
#0 FN TN

#accuracy = (TP+TN)/(TP+TN + FP + FN)


#sensitivity = TP/(TP+FN)
#specificty = TN/(TN+FP)
#PPV = TP / (TP+FP)
#NPV = TN / (TN+FN)

Exercice 3 : Classification des images


Correction :
from google.colab import drive
drive.mount("/content/drive", force_remount=True)

!nvcc --version
!unzip "/content/drive/MyDrive/Nouveaudossier.zip"

#Extraction de caracteristiques
import glob
import cv2
import numpy as np
from scipy.stats import skew, entropy, kurtosis
import os
import math
vector = [[] for i in range(500)]
for img in glob.glob("/content/Nouveaudossier/data/*.jpg"):
features = []
base=os.path.basename(img)
index=os.path.splitext(base)[0]
n= cv2.imread(img)
gray = cv2.cvtColor(n, cv2.COLOR_BGR2GRAY)
gray = np.reshape(gray,(576*576))
mean = np.mean(gray)
features.append(mean)
std = np.std(gray)
features.append(std)
sk = skew(gray)
features.append(sk)
entr = entropy(gray)
features.append(entr)
kurtosi = kurtosis(gray)
features.append(kurtosi)
features = [0 if math.isnan(x) else x for x in features]
vector[int(index)] = features

#Recuperation de la cible
import pandas as pd
labels = pd.read_csv('/content/Nouveaudossier/labels.csv')

labels = labels['label']

df = pd.DataFrame(vector, columns = ['mean', 'std','sk','entr','kurtosi'])


print(df)

from sklearn.model_selection import train_test_split


train_x, test_x, train_y, test_y = train_test_split(df, labels, test_size=0.15)

#choix de modele
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
#random forest
classifier_rf = RandomForestClassifier()
#SVM
model_svm = SVC()

#entrainement
classifier_rf.fit(train_x, train_y)
model_svm.fit(train_x, train_y)

#evaluation
predictions = classifier_rf.predict(test_x)

pred_ = model_svm.predict(test_x)

#evaluation metrique
#Random forest
from sklearn.metrics import accuracy_score,confusion_matrix
print(accuracy_score(test_y, predictions))
confusion_matrix(test_y, predictions)

#SVM
print(accuracy_score(test_y, pred_))
confusion_matrix(test_y, pred_)

You might also like