0% found this document useful (0 votes)
5 views

_MLP_reg_improved pdf

The document outlines a machine learning process using a Multi-layer Perceptron (MLP) regressor to predict mechanical properties of aluminum alloys based on their chemical composition. It includes data preprocessing steps, model training, evaluation metrics, and visualization of actual vs. predicted values. Additionally, it explores feature engineering and hyperparameter tuning to improve model performance.

Uploaded by

souviksarkar6295
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
5 views

_MLP_reg_improved pdf

The document outlines a machine learning process using a Multi-layer Perceptron (MLP) regressor to predict mechanical properties of aluminum alloys based on their chemical composition. It includes data preprocessing steps, model training, evaluation metrics, and visualization of actual vs. predicted values. Additionally, it explores feature engineering and hyperparameter tuning to improve model performance.

Uploaded by

souviksarkar6295
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 38

import numpy as np

import pandas as pd
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split

AL_csv=pd.read_csv("AL_seriesN.csv")

display(AL_csv.head())

Alloy Si Fe Cu Mn Mg Cr Zn Ti Zr \
0 1050 0.125 0.200 0.025 0.025 0.025 0.0 0.025 0.015 0.0
1 1060 0.125 0.175 0.025 0.015 0.015 0.0 0.025 0.015 0.0
2 1070 0.100 0.125 0.020 0.015 0.015 0.0 0.020 0.015 0.0
3 1080 0.075 0.075 0.015 0.010 0.010 0.0 0.015 0.015 0.0
4 1085 0.050 0.060 0.015 0.010 0.010 0.0 0.015 0.010 0.0

Trace element (Ga+Co+Ag+Bi+Be+Sc+B+Ni+Sn+Pb+Li+V) Al UTS


(Mpa) \
0 0.025 99.535
119.25
1 0.025 99.580
93.62
2 0.025 99.665
103.63
3 0.025 99.745
99.50
4 0.025 99.790
103.63

YS (Mpa) TE (%)
0 100.00 16.00
1 69.12 17.00
2 72.88 10.74
3 71.88 10.18
4 72.88 10.74

AL_csv.columns

Index(['Alloy', 'Si', 'Fe', 'Cu', 'Mn', 'Mg', 'Cr', 'Zn', 'Ti', 'Zr',
'Trace element (Ga+Co+Ag+Bi+Be+Sc+B+Ni+Sn+Pb+Li+V)', 'Al', 'UTS
(Mpa)',
'YS (Mpa)', 'TE (%)'],
dtype='object')

X = AL_csv.drop(['UTS (Mpa)', 'YS (Mpa)', 'TE (%)','Alloy'], axis=1)


y = AL_csv[["UTS (Mpa)", "YS (Mpa)", "TE (%)"]]

X.head(3)
Si Fe Cu Mn Mg Cr Zn Ti Zr \
0 0.125 0.200 0.025 0.025 0.025 0.0 0.025 0.015 0.0
1 0.125 0.175 0.025 0.015 0.015 0.0 0.025 0.015 0.0
2 0.100 0.125 0.020 0.015 0.015 0.0 0.020 0.015 0.0

Trace element (Ga+Co+Ag+Bi+Be+Sc+B+Ni+Sn+Pb+Li+V) Al


0 0.025 99.535
1 0.025 99.580
2 0.025 99.665

y.head(3)

UTS (Mpa) YS (Mpa) TE (%)


0 119.25 100.00 16.00
1 93.62 69.12 17.00
2 103.63 72.88 10.74

# Replace infinity with NaN


X.replace([np.inf, -np.inf], np.nan, inplace=True)

# Fill NaN values with the column mean


X = X.fillna(X.mean())

# Split the data


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=42)

# Normalize the data


from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# Train the model


mlp = MLPRegressor(
hidden_layer_sizes=(200, 100, 50), # Increased complexity
max_iter=10000, # More iterations
learning_rate_init=0.001, # Adjusted learning rate
early_stopping=True, # Early stopping
validation_fraction=0.1, # Validation set for early
stopping
n_iter_no_change=50, # Stop if no improvement for
50 iterations
random_state=42
)
mlp.fit(X_train, y_train)

MLPRegressor(early_stopping=True, hidden_layer_sizes=(200, 100, 50),


max_iter=10000, n_iter_no_change=50, random_state=42)
#Evaluate the model
from sklearn.metrics import mean_squared_error, r2_score
y_pred = mlp.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print(f'Mean Squared Error: {mse}')
r2 = r2_score(y_test, y_pred)
print(f'R^2 Score: {r2}')
train_score = mlp.score(X_train, y_train)
test_score = mlp.score(X_test, y_test)
print(f"Training R^2: {train_score:.4f}")
print(f"Testing R^2: {test_score:.4f}")

Mean Squared Error: 5867.186640475095


R^2 Score: 0.14901906081596586
Training R^2: 0.2170
Testing R^2: 0.1490

import matplotlib.pyplot as plt


# Plot actual vs predicted
plt.scatter(y_test['UTS (Mpa)'], y_pred[:, 0], label='UTS (Mpa)')
plt.scatter(y_test['YS (Mpa)'], y_pred[:, 1], label='YS (Mpa)')
plt.scatter(y_test['TE (%)'], y_pred[:, 2], label='TE (%)')
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('Actual vs Predicted')
plt.legend()
plt.show()
with new features
X = AL_csv.drop(['UTS (Mpa)', 'YS (Mpa)', 'TE (%)','Alloy'], axis=1)
y = AL_csv[["UTS (Mpa)", "YS (Mpa)", "TE (%)"]]

X['Weighted_Sum'] = 0.265353 * X['Cu'] + 0.159489 * X['Zn'] + 0.136757


* X['Al'] + 0.094458 * X['Mg']
X['Cu_Zn_Interaction'] = X['Cu'] * X['Zn']
X['Log_Cu'] = np.log(X['Cu'] + 1)
X['Cu_to_Zn_Ratio'] = X['Cu'] / X['Zn']
X['Trace_Elements_Sum'] = X['Trace element
(Ga+Co+Ag+Bi+Be+Sc+B+Ni+Sn+Pb+Li+V)']

# Replace infinity with NaN


X.replace([np.inf, -np.inf], np.nan, inplace=True)

# Fill NaN values with the column mean


X = X.fillna(X.mean())

# Split the data


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=42)
# Normalize the data
from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# Train the model


mlp = MLPRegressor(
hidden_layer_sizes=(200, 100, 50), # Increased complexity
max_iter=10000, # More iterations
learning_rate_init=0.001, # Adjusted learning rate
early_stopping=True, # Early stopping
validation_fraction=0.1, # Validation set for early
stopping
n_iter_no_change=50, # Stop if no improvement for
50 iterations
random_state=42
)
mlp.fit(X_train, y_train)

MLPRegressor(early_stopping=True, hidden_layer_sizes=(200, 100, 50),


max_iter=10000, n_iter_no_change=50, random_state=42)

#Evaluate the model


from sklearn.metrics import mean_squared_error, r2_score
y_pred = mlp.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print(f'Mean Squared Error: {mse}')
r2 = r2_score(y_test, y_pred)
print(f'R^2 Score: {r2}')
train_score = mlp.score(X_train, y_train)
test_score = mlp.score(X_test, y_test)
print(f"Training R^2: {train_score:.4f}")
print(f"Testing R^2: {test_score:.4f}")

Mean Squared Error: 5331.229923952499


R^2 Score: 0.17351464768811006
Training R^2: 0.3370
Testing R^2: 0.1735

import matplotlib.pyplot as plt


# Plot actual vs predicted
plt.scatter(y_test['UTS (Mpa)'], y_pred[:, 0], label='UTS (Mpa)')
plt.scatter(y_test['YS (Mpa)'], y_pred[:, 1], label='YS (Mpa)')
plt.scatter(y_test['TE (%)'], y_pred[:, 2], label='TE (%)')
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('Actual vs Predicted')
plt.legend()
plt.show()

with hyperparamter tuning


X = AL_csv.drop(['UTS (Mpa)', 'YS (Mpa)', 'TE (%)','Alloy'], axis=1)
y = AL_csv[["UTS (Mpa)", "YS (Mpa)", "TE (%)"]]

# Split the data


X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2, random_state=42)

# Normalize the data


from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# Train the model


mlp = MLPRegressor(
hidden_layer_sizes=(50, 25), # Two hidden layers
activation='relu', # ReLU activation
solver='adam', # Use Adam solver
alpha=0.001, # Regularization strength
learning_rate_init=0.001, # Initial learning rate
max_iter=1000, # Increase max iterations
early_stopping=True, # Enable early stopping
validation_fraction=0.2, # 20% validation data
n_iter_no_change=10, # Stop if no improvement for 10
epochs
random_state=42, # Reproducibility
verbose=True # Monitor training progress
)
mlp.fit(X_train, y_train)

Iteration 1, loss = 31685.53270493


Validation score: -4.271692
Iteration 2, loss = 31675.63089445
Validation score: -4.271575
Iteration 3, loss = 31665.66221233
Validation score: -4.271310
Iteration 4, loss = 31655.63073756
Validation score: -4.271009
Iteration 5, loss = 31645.54923466
Validation score: -4.270644
Iteration 6, loss = 31635.38447279
Validation score: -4.270310
Iteration 7, loss = 31625.10497769
Validation score: -4.270041
Iteration 8, loss = 31614.74482908
Validation score: -4.269689
Iteration 9, loss = 31604.28143496
Validation score: -4.269078
Iteration 10, loss = 31593.77337621
Validation score: -4.268418
Iteration 11, loss = 31583.12489926
Validation score: -4.267487
Iteration 12, loss = 31572.36598410
Validation score: -4.266154
Iteration 13, loss = 31561.50451117
Validation score: -4.264910
Iteration 14, loss = 31550.53057331
Validation score: -4.263520
Iteration 15, loss = 31539.41808163
Validation score: -4.261786
Iteration 16, loss = 31528.13222853
Validation score: -4.259949
Iteration 17, loss = 31516.77906113
Validation score: -4.257890
Iteration 18, loss = 31505.34617029
Validation score: -4.255369
Iteration 19, loss = 31493.78082560
Validation score: -4.252544
Iteration 20, loss = 31482.04976564
Validation score: -4.249659
Iteration 21, loss = 31470.10547801
Validation score: -4.246788
Iteration 22, loss = 31457.93865898
Validation score: -4.243773
Iteration 23, loss = 31445.55333200
Validation score: -4.240528
Iteration 24, loss = 31432.92936649
Validation score: -4.237142
Iteration 25, loss = 31420.09422817
Validation score: -4.233702
Iteration 26, loss = 31406.96941585
Validation score: -4.230038
Iteration 27, loss = 31393.57205349
Validation score: -4.226048
Iteration 28, loss = 31379.91008807
Validation score: -4.221839
Iteration 29, loss = 31365.93880976
Validation score: -4.217439
Iteration 30, loss = 31351.63377730
Validation score: -4.212593
Iteration 31, loss = 31336.95327528
Validation score: -4.207405
Iteration 32, loss = 31321.93159309
Validation score: -4.201889
Iteration 33, loss = 31306.56216899
Validation score: -4.196158
Iteration 34, loss = 31290.82336928
Validation score: -4.190175
Iteration 35, loss = 31274.72224026
Validation score: -4.183901
Iteration 36, loss = 31258.25070654
Validation score: -4.177235
Iteration 37, loss = 31241.39874513
Validation score: -4.170245
Iteration 38, loss = 31224.18684287
Validation score: -4.162943
Iteration 39, loss = 31206.59692237
Validation score: -4.155261
Iteration 40, loss = 31188.56854807
Validation score: -4.147250
Iteration 41, loss = 31170.08012217
Validation score: -4.138726
Iteration 42, loss = 31151.10208309
Validation score: -4.129681
Iteration 43, loss = 31131.65490277
Validation score: -4.120269
Iteration 44, loss = 31111.70722541
Validation score: -4.110433
Iteration 45, loss = 31091.29705621
Validation score: -4.100200
Iteration 46, loss = 31070.38281370
Validation score: -4.089578
Iteration 47, loss = 31048.94746520
Validation score: -4.078565
Iteration 48, loss = 31026.99486747
Validation score: -4.067158
Iteration 49, loss = 31004.51099799
Validation score: -4.055246
Iteration 50, loss = 30981.47017937
Validation score: -4.042830
Iteration 51, loss = 30957.84267963
Validation score: -4.029959
Iteration 52, loss = 30933.63287754
Validation score: -4.016640
Iteration 53, loss = 30908.79514815
Validation score: -4.002874
Iteration 54, loss = 30883.33520934
Validation score: -3.988537
Iteration 55, loss = 30857.24154561
Validation score: -3.973622
Iteration 56, loss = 30830.51321910
Validation score: -3.958235
Iteration 57, loss = 30803.12858028
Validation score: -3.942458
Iteration 58, loss = 30775.02328534
Validation score: -3.925876
Iteration 59, loss = 30746.19794413
Validation score: -3.908675
Iteration 60, loss = 30716.64510722
Validation score: -3.890968
Iteration 61, loss = 30686.26955168
Validation score: -3.872822
Iteration 62, loss = 30655.04806415
Validation score: -3.854223
Iteration 63, loss = 30623.08085126
Validation score: -3.835096
Iteration 64, loss = 30590.35797233
Validation score: -3.815548
Iteration 65, loss = 30556.82915660
Validation score: -3.795669
Iteration 66, loss = 30522.48515515
Validation score: -3.775323
Iteration 67, loss = 30487.34476132
Validation score: -3.754550
Iteration 68, loss = 30451.38447017
Validation score: -3.733337
Iteration 69, loss = 30414.55850801
Validation score: -3.711706
Iteration 70, loss = 30376.86003141
Validation score: -3.689658
Iteration 71, loss = 30338.25909143
Validation score: -3.667231
Iteration 72, loss = 30298.77358770
Validation score: -3.644452
Iteration 73, loss = 30258.35723178
Validation score: -3.621232
Iteration 74, loss = 30217.00509107
Validation score: -3.597560
Iteration 75, loss = 30174.74087914
Validation score: -3.573494
Iteration 76, loss = 30131.48847549
Validation score: -3.549011
Iteration 77, loss = 30087.24772517
Validation score: -3.524093
Iteration 78, loss = 30042.03590269
Validation score: -3.498755
Iteration 79, loss = 29995.78356439
Validation score: -3.473012
Iteration 80, loss = 29948.47491901
Validation score: -3.446922
Iteration 81, loss = 29900.14639944
Validation score: -3.420447
Iteration 82, loss = 29850.76830258
Validation score: -3.393503
Iteration 83, loss = 29800.34218872
Validation score: -3.366215
Iteration 84, loss = 29748.86540582
Validation score: -3.338598
Iteration 85, loss = 29696.32093175
Validation score: -3.310659
Iteration 86, loss = 29642.67816190
Validation score: -3.282403
Iteration 87, loss = 29587.93573729
Validation score: -3.253916
Iteration 88, loss = 29532.03511509
Validation score: -3.225187
Iteration 89, loss = 29474.94327553
Validation score: -3.196218
Iteration 90, loss = 29416.50630370
Validation score: -3.167018
Iteration 91, loss = 29356.76277131
Validation score: -3.137574
Iteration 92, loss = 29295.77408952
Validation score: -3.107979
Iteration 93, loss = 29233.46254133
Validation score: -3.078221
Iteration 94, loss = 29169.72558996
Validation score: -3.048313
Iteration 95, loss = 29104.65987656
Validation score: -3.018251
Iteration 96, loss = 29038.29081352
Validation score: -2.988108
Iteration 97, loss = 28970.60892944
Validation score: -2.957820
Iteration 98, loss = 28901.60731164
Validation score: -2.927349
Iteration 99, loss = 28831.26483238
Validation score: -2.896888
Iteration 100, loss = 28759.53518740
Validation score: -2.866452
Iteration 101, loss = 28686.38961130
Validation score: -2.836105
Iteration 102, loss = 28611.89062671
Validation score: -2.805794
Iteration 103, loss = 28535.99772874
Validation score: -2.775650
Iteration 104, loss = 28458.68745190
Validation score: -2.745715
Iteration 105, loss = 28379.95359308
Validation score: -2.716033
Iteration 106, loss = 28299.80417455
Validation score: -2.686617
Iteration 107, loss = 28218.16435722
Validation score: -2.657446
Iteration 108, loss = 28135.06490117
Validation score: -2.628639
Iteration 109, loss = 28050.42305742
Validation score: -2.600234
Iteration 110, loss = 27964.27479303
Validation score: -2.572230
Iteration 111, loss = 27876.64587644
Validation score: -2.544716
Iteration 112, loss = 27787.47360170
Validation score: -2.517733
Iteration 113, loss = 27696.75166465
Validation score: -2.491353
Iteration 114, loss = 27604.53394064
Validation score: -2.465616
Iteration 115, loss = 27510.80868329
Validation score: -2.440551
Iteration 116, loss = 27415.52806317
Validation score: -2.416177
Iteration 117, loss = 27318.69937269
Validation score: -2.392529
Iteration 118, loss = 27220.31200172
Validation score: -2.369586
Iteration 119, loss = 27120.30528166
Validation score: -2.347417
Iteration 120, loss = 27018.64466741
Validation score: -2.326015
Iteration 121, loss = 26915.40016538
Validation score: -2.305332
Iteration 122, loss = 26810.58718131
Validation score: -2.285269
Iteration 123, loss = 26704.19189862
Validation score: -2.266013
Iteration 124, loss = 26596.21756369
Validation score: -2.247568
Iteration 125, loss = 26486.66863583
Validation score: -2.229917
Iteration 126, loss = 26375.49288490
Validation score: -2.213043
Iteration 127, loss = 26262.70103930
Validation score: -2.196947
Iteration 128, loss = 26148.29924146
Validation score: -2.181579
Iteration 129, loss = 26032.31786392
Validation score: -2.166902
Iteration 130, loss = 25914.74991090
Validation score: -2.152870
Iteration 131, loss = 25795.54791052
Validation score: -2.139436
Iteration 132, loss = 25674.67658435
Validation score: -2.126520
Iteration 133, loss = 25552.19516953
Validation score: -2.114060
Iteration 134, loss = 25428.01587465
Validation score: -2.102055
Iteration 135, loss = 25302.18702586
Validation score: -2.090446
Iteration 136, loss = 25174.76871675
Validation score: -2.079128
Iteration 137, loss = 25045.76865197
Validation score: -2.068018
Iteration 138, loss = 24915.15717621
Validation score: -2.057053
Iteration 139, loss = 24782.94928738
Validation score: -2.046176
Iteration 140, loss = 24649.09056764
Validation score: -2.035341
Iteration 141, loss = 24513.61631012
Validation score: -2.024503
Iteration 142, loss = 24376.54018435
Validation score: -2.013611
Iteration 143, loss = 24237.81566799
Validation score: -2.002644
Iteration 144, loss = 24097.48815756
Validation score: -1.991573
Iteration 145, loss = 23955.59073043
Validation score: -1.980371
Iteration 146, loss = 23812.13224238
Validation score: -1.969015
Iteration 147, loss = 23667.06118671
Validation score: -1.957488
Iteration 148, loss = 23520.42322425
Validation score: -1.945761
Iteration 149, loss = 23372.25168086
Validation score: -1.933827
Iteration 150, loss = 23222.52945274
Validation score: -1.921833
Iteration 151, loss = 23071.33585140
Validation score: -1.909644
Iteration 152, loss = 22918.68861353
Validation score: -1.897269
Iteration 153, loss = 22764.60776502
Validation score: -1.884717
Iteration 154, loss = 22609.02540374
Validation score: -1.872007
Iteration 155, loss = 22452.01636085
Validation score: -1.859148
Iteration 156, loss = 22293.61358909
Validation score: -1.846190
Iteration 157, loss = 22133.83085129
Validation score: -1.833295
Iteration 158, loss = 21972.69591089
Validation score: -1.820295
Iteration 159, loss = 21810.25554546
Validation score: -1.807213
Iteration 160, loss = 21646.46930914
Validation score: -1.794094
Iteration 161, loss = 21481.39476522
Validation score: -1.780999
Iteration 162, loss = 21315.06101274
Validation score: -1.767926
Iteration 163, loss = 21147.47739720
Validation score: -1.754853
Iteration 164, loss = 20978.71257003
Validation score: -1.741953
Iteration 165, loss = 20808.77668744
Validation score: -1.729144
Iteration 166, loss = 20637.72746305
Validation score: -1.716416
Iteration 167, loss = 20465.59615314
Validation score: -1.703731
Iteration 168, loss = 20292.36484539
Validation score: -1.691131
Iteration 169, loss = 20118.05513662
Validation score: -1.678642
Iteration 170, loss = 19942.74321998
Validation score: -1.666300
Iteration 171, loss = 19766.40282067
Validation score: -1.654100
Iteration 172, loss = 19589.08606561
Validation score: -1.642085
Iteration 173, loss = 19410.77508779
Validation score: -1.630264
Iteration 174, loss = 19231.57363725
Validation score: -1.618609
Iteration 175, loss = 19051.53734083
Validation score: -1.607133
Iteration 176, loss = 18870.72968060
Validation score: -1.595862
Iteration 177, loss = 18689.11334970
Validation score: -1.584794
Iteration 178, loss = 18506.66489697
Validation score: -1.573871
Iteration 179, loss = 18323.49383031
Validation score: -1.563054
Iteration 180, loss = 18139.62839454
Validation score: -1.552389
Iteration 181, loss = 17955.14320928
Validation score: -1.541855
Iteration 182, loss = 17769.97282214
Validation score: -1.531414
Iteration 183, loss = 17584.05193483
Validation score: -1.521030
Iteration 184, loss = 17397.62559279
Validation score: -1.510705
Iteration 185, loss = 17210.61613784
Validation score: -1.500429
Iteration 186, loss = 17023.14511247
Validation score: -1.490096
Iteration 187, loss = 16835.36934970
Validation score: -1.479653
Iteration 188, loss = 16647.35190309
Validation score: -1.469067
Iteration 189, loss = 16459.13424228
Validation score: -1.458294
Iteration 190, loss = 16270.76782648
Validation score: -1.447281
Iteration 191, loss = 16082.27707310
Validation score: -1.436011
Iteration 192, loss = 15893.24548085
Validation score: -1.424480
Iteration 193, loss = 15704.16547562
Validation score: -1.412659
Iteration 194, loss = 15515.08867722
Validation score: -1.400500
Iteration 195, loss = 15326.12080538
Validation score: -1.387916
Iteration 196, loss = 15137.32651188
Validation score: -1.374893
Iteration 197, loss = 14948.69306443
Validation score: -1.361448
Iteration 198, loss = 14760.22567611
Validation score: -1.347568
Iteration 199, loss = 14572.11426868
Validation score: -1.333340
Iteration 200, loss = 14384.28940425
Validation score: -1.318749
Iteration 201, loss = 14196.67025453
Validation score: -1.303799
Iteration 202, loss = 14009.54982877
Validation score: -1.288567
Iteration 203, loss = 13823.01453107
Validation score: -1.273090
Iteration 204, loss = 13637.12484807
Validation score: -1.257404
Iteration 205, loss = 13451.90937689
Validation score: -1.241540
Iteration 206, loss = 13267.43112028
Validation score: -1.225522
Iteration 207, loss = 13083.55549613
Validation score: -1.209391
Iteration 208, loss = 12900.44191054
Validation score: -1.193213
Iteration 209, loss = 12718.26673361
Validation score: -1.177006
Iteration 210, loss = 12537.02041590
Validation score: -1.160790
Iteration 211, loss = 12356.77702539
Validation score: -1.144643
Iteration 212, loss = 12177.56026696
Validation score: -1.128604
Iteration 213, loss = 11999.39071022
Validation score: -1.112669
Iteration 214, loss = 11822.39502565
Validation score: -1.096884
Iteration 215, loss = 11646.64111126
Validation score: -1.081287
Iteration 216, loss = 11472.18810737
Validation score: -1.065903
Iteration 217, loss = 11299.07063583
Validation score: -1.050800
Iteration 218, loss = 11127.30739029
Validation score: -1.035946
Iteration 219, loss = 10956.93426984
Validation score: -1.021361
Iteration 220, loss = 10788.02399100
Validation score: -1.007055
Iteration 221, loss = 10620.64166889
Validation score: -0.993026
Iteration 222, loss = 10454.84171036
Validation score: -0.979268
Iteration 223, loss = 10290.57921494
Validation score: -0.965788
Iteration 224, loss = 10127.97421017
Validation score: -0.952613
Iteration 225, loss = 9967.00945138
Validation score: -0.939669
Iteration 226, loss = 9807.70863819
Validation score: -0.926932
Iteration 227, loss = 9650.16121496
Validation score: -0.914387
Iteration 228, loss = 9494.34294281
Validation score: -0.902009
Iteration 229, loss = 9340.34017715
Validation score: -0.889728
Iteration 230, loss = 9188.17654103
Validation score: -0.877584
Iteration 231, loss = 9037.92813239
Validation score: -0.865575
Iteration 232, loss = 8889.64154763
Validation score: -0.853685
Iteration 233, loss = 8743.32238751
Validation score: -0.841883
Iteration 234, loss = 8599.05372342
Validation score: -0.830151
Iteration 235, loss = 8456.84111549
Validation score: -0.818473
Iteration 236, loss = 8316.69123394
Validation score: -0.806854
Iteration 237, loss = 8178.56440549
Validation score: -0.795285
Iteration 238, loss = 8042.53809250
Validation score: -0.783770
Iteration 239, loss = 7908.44457677
Validation score: -0.772314
Iteration 240, loss = 7776.47874917
Validation score: -0.760940
Iteration 241, loss = 7646.66625138
Validation score: -0.749665
Iteration 242, loss = 7518.80714393
Validation score: -0.738505
Iteration 243, loss = 7393.08167683
Validation score: -0.727496
Iteration 244, loss = 7269.51498472
Validation score: -0.716645
Iteration 245, loss = 7148.13163757
Validation score: -0.705972
Iteration 246, loss = 7028.91739864
Validation score: -0.695498
Iteration 247, loss = 6911.91739156
Validation score: -0.685242
Iteration 248, loss = 6797.11730185
Validation score: -0.675193
Iteration 249, loss = 6684.52079189
Validation score: -0.665366
Iteration 250, loss = 6574.14338065
Validation score: -0.655790
Iteration 251, loss = 6465.98853336
Validation score: -0.646466
Iteration 252, loss = 6359.95916930
Validation score: -0.637406
Iteration 253, loss = 6256.12447143
Validation score: -0.628612
Iteration 254, loss = 6154.47335191
Validation score: -0.620078
Iteration 255, loss = 6055.00433619
Validation score: -0.611780
Iteration 256, loss = 5957.71085364
Validation score: -0.603723
Iteration 257, loss = 5862.59277872
Validation score: -0.595907
Iteration 258, loss = 5769.62880280
Validation score: -0.588320
Iteration 259, loss = 5678.80335163
Validation score: -0.580950
Iteration 260, loss = 5590.10384213
Validation score: -0.573784
Iteration 261, loss = 5503.51492211
Validation score: -0.566810
Iteration 262, loss = 5419.01711256
Validation score: -0.560018
Iteration 263, loss = 5336.58699660
Validation score: -0.553396
Iteration 264, loss = 5256.17996414
Validation score: -0.546937
Iteration 265, loss = 5177.76233430
Validation score: -0.540637
Iteration 266, loss = 5101.32151618
Validation score: -0.534442
Iteration 267, loss = 5026.82118756
Validation score: -0.528239
Iteration 268, loss = 4954.23325177
Validation score: -0.522186
Iteration 269, loss = 4883.50607379
Validation score: -0.516283
Iteration 270, loss = 4814.63819329
Validation score: -0.510537
Iteration 271, loss = 4747.61059622
Validation score: -0.504956
Iteration 272, loss = 4682.39576069
Validation score: -0.499545
Iteration 273, loss = 4618.96319940
Validation score: -0.494312
Iteration 274, loss = 4557.28216694
Validation score: -0.489259
Iteration 275, loss = 4497.31922794
Validation score: -0.484262
Iteration 276, loss = 4439.03822297
Validation score: -0.479419
Iteration 277, loss = 4382.40112900
Validation score: -0.474768
Iteration 278, loss = 4327.37287951
Validation score: -0.470314
Iteration 279, loss = 4273.91366573
Validation score: -0.466043
Iteration 280, loss = 4222.01073595
Validation score: -0.461951
Iteration 281, loss = 4171.61377802
Validation score: -0.457921
Iteration 282, loss = 4122.68759368
Validation score: -0.454070
Iteration 283, loss = 4075.20460017
Validation score: -0.450391
Iteration 284, loss = 4029.13457051
Validation score: -0.446875
Iteration 285, loss = 3984.44134203
Validation score: -0.443513
Iteration 286, loss = 3941.08963806
Validation score: -0.440298
Iteration 287, loss = 3899.04621336
Validation score: -0.437224
Iteration 288, loss = 3858.27646406
Validation score: -0.434191
Iteration 289, loss = 3818.74268623
Validation score: -0.431254
Iteration 290, loss = 3780.38678789
Validation score: -0.428441
Iteration 291, loss = 3743.11444231
Validation score: -0.425755
Iteration 292, loss = 3706.95328781
Validation score: -0.423163
Iteration 293, loss = 3671.87142135
Validation score: -0.420617
Iteration 294, loss = 3637.83398541
Validation score: -0.418191
Iteration 295, loss = 3604.83830880
Validation score: -0.415887
Iteration 296, loss = 3572.87468737
Validation score: -0.413706
Iteration 297, loss = 3541.87114472
Validation score: -0.411649
Iteration 298, loss = 3511.79470308
Validation score: -0.409669
Iteration 299, loss = 3482.60895260
Validation score: -0.407811
Iteration 300, loss = 3454.27740990
Validation score: -0.406073
Iteration 301, loss = 3426.77694982
Validation score: -0.404451
Iteration 302, loss = 3400.10205584
Validation score: -0.402941
Iteration 303, loss = 3374.21791701
Validation score: -0.401538
Iteration 304, loss = 3349.00037177
Validation score: -0.400242
Iteration 305, loss = 3324.49293503
Validation score: -0.399046
Iteration 306, loss = 3300.73346067
Validation score: -0.397954
Iteration 307, loss = 3277.65317445
Validation score: -0.396961
Iteration 308, loss = 3255.25003554
Validation score: -0.396063
Iteration 309, loss = 3233.47300488
Validation score: -0.395256
Iteration 310, loss = 3212.29771045
Validation score: -0.394532
Iteration 311, loss = 3191.69704313
Validation score: -0.393888
Iteration 312, loss = 3171.65571113
Validation score: -0.393319
Iteration 313, loss = 3152.15168757
Validation score: -0.392824
Iteration 314, loss = 3133.16760140
Validation score: -0.392406
Iteration 315, loss = 3114.68987352
Validation score: -0.392057
Iteration 316, loss = 3096.70248943
Validation score: -0.391776
Iteration 317, loss = 3079.18887649
Validation score: -0.391557
Iteration 318, loss = 3062.15144083
Validation score: -0.391396
Iteration 319, loss = 3045.61606234
Validation score: -0.391292
Iteration 320, loss = 3029.51862114
Validation score: -0.391243
Iteration 321, loss = 3013.82281466
Validation score: -0.391248
Iteration 322, loss = 2998.51568809
Validation score: -0.391279
Iteration 323, loss = 2983.58671260
Validation score: -0.391365
Iteration 324, loss = 2969.04525624
Validation score: -0.391502
Iteration 325, loss = 2954.91532521
Validation score: -0.391676
Iteration 326, loss = 2941.11803195
Validation score: -0.391897
Iteration 327, loss = 2927.64499804
Validation score: -0.392165
Iteration 328, loss = 2914.47752686
Validation score: -0.392477
Iteration 329, loss = 2901.61311379
Validation score: -0.392833
Iteration 330, loss = 2889.06583080
Validation score: -0.393234
Validation score did not improve more than tol=0.000100 for 10
consecutive epochs. Stopping.

MLPRegressor(alpha=0.001, early_stopping=True, hidden_layer_sizes=(50,


25),
max_iter=1000, random_state=42, validation_fraction=0.2,
verbose=True)

#Evaluate the model


from sklearn.metrics import mean_squared_error
y_pred = mlp.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
print(f'Mean Squared Error: {mse}')
train_score = mlp.score(X_train, y_train)
test_score = mlp.score(X_test, y_test)
print(f"Training R^2: {train_score:.4f}")
print(f"Testing R^2: {test_score:.4f}")

Mean Squared Error: 8971.311435577689


Training R^2: 0.1129
Testing R^2: -0.1902

from sklearn.model_selection import GridSearchCV

# Define the parameter grid


param_grid = {
'hidden_layer_sizes': [(50,), (50, 25), (100, 50)],
'alpha': [0.0001, 0.001, 0.01],
'learning_rate_init': [0.001, 0.01, 0.1],
'solver': ['adam', 'lbfgs']
}

# Initialize GridSearchCV
grid_search = GridSearchCV(
estimator=MLPRegressor(max_iter=10000, early_stopping=True,
random_state=42),
param_grid=param_grid,
cv=5, # 5-fold cross-validation
scoring='neg_mean_squared_error',
verbose=2
)

# Fit the model


grid_search.fit(X_train, y_train)

# Best parameters
print("Best Parameters:", grid_search.best_params_)

Fitting 5 folds for each of 54 candidates, totalling 270 fits


[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.001, solver=adam; total time= 1.5s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.001, solver=adam; total time= 1.3s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.001, solver=adam; total time= 1.9s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.001, solver=adam; total time= 2.1s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.001, solver=adam; total time= 1.8s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.001, solver=lbfgs; total time= 3.8s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.001, solver=lbfgs; total time= 4.0s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.001, solver=lbfgs; total time= 4.1s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.001, solver=lbfgs; total time= 3.7s
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.001, solver=lbfgs; total time= 3.8s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.01, solver=adam; total time= 0.1s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.01, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.01, solver=adam; total time= 0.1s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.01, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.01, solver=adam; total time= 0.2s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.01, solver=lbfgs; total time= 4.1s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.01, solver=lbfgs; total time= 4.1s
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.01, solver=lbfgs; total time= 4.1s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.01, solver=lbfgs; total time= 3.9s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.01, solver=lbfgs; total time= 4.2s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.1, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.1, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.1, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.1, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50,),
learning_rate_init=0.1, solver=adam; total time= 0.0s
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.1, solver=lbfgs; total time= 4.1s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.1, solver=lbfgs; total time= 3.7s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.1, solver=lbfgs; total time= 4.7s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.1, solver=lbfgs; total time= 4.0s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50,),


learning_rate_init=0.1, solver=lbfgs; total time= 3.8s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.001, solver=adam; total time= 0.8s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.001, solver=adam; total time= 0.9s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.001, solver=adam; total time= 0.8s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.001, solver=adam; total time= 0.9s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.001, solver=adam; total time= 0.2s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),


learning_rate_init=0.001, solver=lbfgs; total time= 6.7s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),


learning_rate_init=0.001, solver=lbfgs; total time= 6.2s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),


learning_rate_init=0.001, solver=lbfgs; total time= 8.4s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.001, solver=lbfgs; total time= 5.0s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),


learning_rate_init=0.001, solver=lbfgs; total time= 8.8s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.01, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.01, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.01, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.01, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.01, solver=adam; total time= 0.0s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as
shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),


learning_rate_init=0.01, solver=lbfgs; total time= 7.8s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),


learning_rate_init=0.01, solver=lbfgs; total time= 5.8s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),


learning_rate_init=0.01, solver=lbfgs; total time= 6.4s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.01, solver=lbfgs; total time= 3.6s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.01, solver=lbfgs; total time= 5.9s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.1, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.1, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.1, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.1, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.1, solver=adam; total time= 0.0s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),


learning_rate_init=0.1, solver=lbfgs; total time= 6.4s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),


learning_rate_init=0.1, solver=lbfgs; total time= 6.4s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.1, solver=lbfgs; total time= 7.7s
[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),
learning_rate_init=0.1, solver=lbfgs; total time= 5.5s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(50, 25),


learning_rate_init=0.1, solver=lbfgs; total time= 9.2s
[CV] END alpha=0.0001, hidden_layer_sizes=(100, 50),
learning_rate_init=0.001, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(100, 50),
learning_rate_init=0.001, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(100, 50),
learning_rate_init=0.001, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(100, 50),
learning_rate_init=0.001, solver=adam; total time= 0.0s
[CV] END alpha=0.0001, hidden_layer_sizes=(100, 50),
learning_rate_init=0.001, solver=adam; total time= 0.0s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(100, 50),


learning_rate_init=0.001, solver=lbfgs; total time= 12.5s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(100, 50),


learning_rate_init=0.001, solver=lbfgs; total time= 12.7s

C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as


shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)

[CV] END alpha=0.0001, hidden_layer_sizes=(100, 50),


learning_rate_init=0.001, solver=lbfgs; total time= 12.9s

----------------------------------------------------------------------
-----
KeyboardInterrupt Traceback (most recent call
last)
Cell In[26], line 21
12 grid_search = GridSearchCV(
13 estimator=MLPRegressor(max_iter=10000,
early_stopping=True, random_state=42),
14 param_grid=param_grid,
(...)
17 verbose=2
18 )
20 # Fit the model
---> 21 grid_search.fit(X_train, y_train)
23 # Best parameters
24 print("Best Parameters:", grid_search.best_params_)

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
base.py:1473, in
_fit_context.<locals>.decorator.<locals>.wrapper(estimator, *args,
**kwargs)
1466 estimator._validate_params()
1468 with config_context(
1469 skip_parameter_validation=(
1470 prefer_skip_nested_validation or
global_skip_validation
1471 )
1472 ):
-> 1473 return fit_method(estimator, *args, **kwargs)
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
model_selection\_search.py:1018, in BaseSearchCV.fit(self, X, y,
**params)
1012 results = self._format_results(
1013 all_candidate_params, n_splits, all_out,
all_more_results
1014 )
1016 return results
-> 1018 self._run_search(evaluate_candidates)
1020 # multimetric is determined here because in the case of a
callable
1021 # self.scoring the return type is only known after calling
1022 first_test_score = all_out[0]["test_scores"]

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
model_selection\_search.py:1572, in GridSearchCV._run_search(self,
evaluate_candidates)
1570 def _run_search(self, evaluate_candidates):
1571 """Search all candidates in param_grid"""
-> 1572 evaluate_candidates(ParameterGrid(self.param_grid))

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
model_selection\_search.py:964, in
BaseSearchCV.fit.<locals>.evaluate_candidates(candidate_params, cv,
more_results)
956 if self.verbose > 0:
957 print(
958 "Fitting {0} folds for each of {1} candidates,"
959 " totalling {2} fits".format(
960 n_splits, n_candidates, n_candidates * n_splits
961 )
962 )
--> 964 out = parallel(
965 delayed(_fit_and_score)(
966 clone(base_estimator),
967 X,
968 y,
969 train=train,
970 test=test,
971 parameters=parameters,
972 split_progress=(split_idx, n_splits),
973 candidate_progress=(cand_idx, n_candidates),
974 **fit_and_score_kwargs,
975 )
976 for (cand_idx, parameters), (split_idx, (train, test)) in
product(
977 enumerate(candidate_params),
978 enumerate(cv.split(X, y,
**routed_params.splitter.split)),
979 )
980 )
982 if len(out) < 1:
983 raise ValueError(
984 "No fits were performed. "
985 "Was the CV iterator empty? "
986 "Were there no candidates?"
987 )

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\utils\
parallel.py:74, in Parallel.__call__(self, iterable)
69 config = get_config()
70 iterable_with_config = (
71 (_with_config(delayed_func, config), args, kwargs)
72 for delayed_func, args, kwargs in iterable
73 )
---> 74 return super().__call__(iterable_with_config)

File ~\anaconda3\Lib\site-packages\joblib\parallel.py:1918, in
Parallel.__call__(self, iterable)
1916 output = self._get_sequential_output(iterable)
1917 next(output)
-> 1918 return output if self.return_generator else list(output)
1920 # Let's create an ID that uniquely identifies the current
call. If the
1921 # call is interrupted early and that the same instance is
immediately
1922 # re-used, this id will be used to prevent workers that were
1923 # concurrently finalizing a task from the previous call to run
the
1924 # callback.
1925 with self._lock:

File ~\anaconda3\Lib\site-packages\joblib\parallel.py:1847, in
Parallel._get_sequential_output(self, iterable)
1845 self.n_dispatched_batches += 1
1846 self.n_dispatched_tasks += 1
-> 1847 res = func(*args, **kwargs)
1848 self.n_completed_tasks += 1
1849 self.print_progress()

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\utils\
parallel.py:136, in _FuncWrapper.__call__(self, *args, **kwargs)
134 config = {}
135 with config_context(**config):
--> 136 return self.function(*args, **kwargs)

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
model_selection\_validation.py:888, in _fit_and_score(estimator, X, y,
scorer, train, test, verbose, parameters, fit_params, score_params,
return_train_score, return_parameters, return_n_test_samples,
return_times, return_estimator, split_progress, candidate_progress,
error_score)
886 estimator.fit(X_train, **fit_params)
887 else:
--> 888 estimator.fit(X_train, y_train, **fit_params)
890 except Exception:
891 # Note fit time as time until error
892 fit_time = time.time() - start_time

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
base.py:1473, in
_fit_context.<locals>.decorator.<locals>.wrapper(estimator, *args,
**kwargs)
1466 estimator._validate_params()
1468 with config_context(
1469 skip_parameter_validation=(
1470 prefer_skip_nested_validation or
global_skip_validation
1471 )
1472 ):
-> 1473 return fit_method(estimator, *args, **kwargs)

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:751, in
BaseMultilayerPerceptron.fit(self, X, y)
733 @_fit_context(prefer_skip_nested_validation=True)
734 def fit(self, X, y):
735 """Fit the model to data matrix X and target(s) y.
736
737 Parameters
(...)
749 Returns a trained MLP model.
750 """
--> 751 return self._fit(X, y, incremental=False)

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:488, in
BaseMultilayerPerceptron._fit(self, X, y, incremental)
486 # Run the LBFGS solver
487 elif self.solver == "lbfgs":
--> 488 self._fit_lbfgs(
489 X, y, activations, deltas, coef_grads,
intercept_grads, layer_units
490 )
492 # validate parameter weights
493 weights = chain(self.coefs_, self.intercepts_)

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:532, in
BaseMultilayerPerceptron._fit_lbfgs(self, X, y, activations, deltas,
coef_grads, intercept_grads, layer_units)
529 else:
530 iprint = -1
--> 532 opt_res = scipy.optimize.minimize(
533 self._loss_grad_lbfgs,
534 packed_coef_inter,
535 method="L-BFGS-B",
536 jac=True,
537 options={
538 "maxfun": self.max_fun,
539 "maxiter": self.max_iter,
540 "iprint": iprint,
541 "gtol": self.tol,
542 },
543 args=(X, y, activations, deltas, coef_grads,
intercept_grads),
544 )
545 self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)
546 self.loss_ = opt_res.fun

File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_minimize.py:731, in minimize(fun, x0, args, method, jac, hess, hessp,
bounds, constraints, tol, callback, options)
728 res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp,
callback,
729 **options)
730 elif meth == 'l-bfgs-b':
--> 731 res = _minimize_lbfgsb(fun, x0, args, jac, bounds,
732 callback=callback, **options)
733 elif meth == 'tnc':
734 res = _minimize_tnc(fun, x0, args, jac, bounds,
callback=callback,
735 **options)

File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_lbfgsb_py.py:407, in _minimize_lbfgsb(fun, x0, args, jac, bounds,
disp, maxcor, ftol, gtol, eps, maxfun, maxiter, iprint, callback,
maxls, finite_diff_rel_step, **unknown_options)
401 task_str = task.tobytes()
402 if task_str.startswith(b'FG'):
403 # The minimization routine wants f and g at the current x.
404 # Note that interruptions due to maxfun are postponed
405 # until the completion of the current minimization
iteration.
406 # Overwrite f and g:
--> 407 f, g = func_and_grad(x)
408 elif task_str.startswith(b'NEW_X'):
409 # new iteration
410 n_iterations += 1

File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_differentiable_functions.py:343, in ScalarFunction.fun_and_grad(self,
x)
341 if not np.array_equal(x, self.x):
342 self._update_x(x)
--> 343 self._update_fun()
344 self._update_grad()
345 return self.f, self.g

File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_differentiable_functions.py:294, in ScalarFunction._update_fun(self)
292 def _update_fun(self):
293 if not self.f_updated:
--> 294 fx = self._wrapped_fun(self.x)
295 if fx < self._lowest_f:
296 self._lowest_x = self.x

File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_differentiable_functions.py:20, in _wrapper_fun.<locals>.wrapped(x)
16 ncalls[0] += 1
17 # Send a copy because the user may overwrite it.
18 # Overwriting results in undefined behaviour because
19 # fun(self.x) will change self.x, with the two no longer
linked.
---> 20 fx = fun(np.copy(x), *args)
21 # Make sure the function returns a true scalar
22 if not np.isscalar(fx):

File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_optimize.py:79, in MemoizeJac.__call__(self, x, *args)
77 def __call__(self, x, *args):
78 """ returns the function value """
---> 79 self._compute_if_needed(x, *args)
80 return self._value

File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_optimize.py:73, in MemoizeJac._compute_if_needed(self, x, *args)
71 if not np.all(x == self.x) or self._value is None or self.jac
is None:
72 self.x = np.asarray(x).copy()
---> 73 fg = self.fun(x, *args)
74 self.jac = fg[1]
75 self._value = fg[0]

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:281, in
BaseMultilayerPerceptron._loss_grad_lbfgs(self, packed_coef_inter, X,
y, activations, deltas, coef_grads, intercept_grads)
240 """Compute the MLP loss function and its corresponding
derivatives
241 with respect to the different parameters given in the
initialization.
242
(...)
278 grad : array-like, shape (number of nodes of all layers,)
279 """
280 self._unpack(packed_coef_inter)
--> 281 loss, coef_grads, intercept_grads = self._backprop(
282 X, y, activations, deltas, coef_grads, intercept_grads
283 )
284 grad = _pack(coef_grads, intercept_grads)
285 return loss, grad

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:357, in
BaseMultilayerPerceptron._backprop(self, X, y, activations, deltas,
coef_grads, intercept_grads)
355 # Iterate over the hidden layers
356 for i in range(self.n_layers_ - 2, 0, -1):
--> 357 deltas[i - 1] = safe_sparse_dot(deltas[i],
self.coefs_[i].T)
358 inplace_derivative(activations[i], deltas[i - 1])
360 self._compute_loss_grad(
361 i - 1, n_samples, activations, deltas, coef_grads,
intercept_grads
362 )

File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\utils\
extmath.py:208, in safe_sparse_dot(a, b, dense_output)
204 else:
205 ret = a @ b
207 if (
--> 208 sparse.issparse(a)
209 and sparse.issparse(b)
210 and dense_output
211 and hasattr(ret, "toarray")
212 ):
213 return ret.toarray()
214 return ret

File ~\AppData\Roaming\Python\Python312\site-packages\scipy\sparse\
_base.py:1335, in issparse(x)
1329 """A namespace class to separate sparray from spmatrix"""
1332 sparray.__doc__ = _spbase.__doc__
-> 1335 def issparse(x):
1336 """Is `x` of a sparse array or sparse matrix type?
1337
1338 Parameters
(...)
1359 False
1360 """
1361 return isinstance(x, _spbase)

KeyboardInterrupt:

You might also like