_MLP_reg_improved pdf
_MLP_reg_improved pdf
import pandas as pd
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
AL_csv=pd.read_csv("AL_seriesN.csv")
display(AL_csv.head())
Alloy Si Fe Cu Mn Mg Cr Zn Ti Zr \
0 1050 0.125 0.200 0.025 0.025 0.025 0.0 0.025 0.015 0.0
1 1060 0.125 0.175 0.025 0.015 0.015 0.0 0.025 0.015 0.0
2 1070 0.100 0.125 0.020 0.015 0.015 0.0 0.020 0.015 0.0
3 1080 0.075 0.075 0.015 0.010 0.010 0.0 0.015 0.015 0.0
4 1085 0.050 0.060 0.015 0.010 0.010 0.0 0.015 0.010 0.0
YS (Mpa) TE (%)
0 100.00 16.00
1 69.12 17.00
2 72.88 10.74
3 71.88 10.18
4 72.88 10.74
AL_csv.columns
Index(['Alloy', 'Si', 'Fe', 'Cu', 'Mn', 'Mg', 'Cr', 'Zn', 'Ti', 'Zr',
'Trace element (Ga+Co+Ag+Bi+Be+Sc+B+Ni+Sn+Pb+Li+V)', 'Al', 'UTS
(Mpa)',
'YS (Mpa)', 'TE (%)'],
dtype='object')
X.head(3)
Si Fe Cu Mn Mg Cr Zn Ti Zr \
0 0.125 0.200 0.025 0.025 0.025 0.0 0.025 0.015 0.0
1 0.125 0.175 0.025 0.015 0.015 0.0 0.025 0.015 0.0
2 0.100 0.125 0.020 0.015 0.015 0.0 0.020 0.015 0.0
y.head(3)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Initialize GridSearchCV
grid_search = GridSearchCV(
estimator=MLPRegressor(max_iter=10000, early_stopping=True,
random_state=42),
param_grid=param_grid,
cv=5, # 5-fold cross-validation
scoring='neg_mean_squared_error',
verbose=2
)
# Best parameters
print("Best Parameters:", grid_search.best_params_)
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as
shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
C:\Users\user\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:545: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
----------------------------------------------------------------------
-----
KeyboardInterrupt Traceback (most recent call
last)
Cell In[26], line 21
12 grid_search = GridSearchCV(
13 estimator=MLPRegressor(max_iter=10000,
early_stopping=True, random_state=42),
14 param_grid=param_grid,
(...)
17 verbose=2
18 )
20 # Fit the model
---> 21 grid_search.fit(X_train, y_train)
23 # Best parameters
24 print("Best Parameters:", grid_search.best_params_)
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
base.py:1473, in
_fit_context.<locals>.decorator.<locals>.wrapper(estimator, *args,
**kwargs)
1466 estimator._validate_params()
1468 with config_context(
1469 skip_parameter_validation=(
1470 prefer_skip_nested_validation or
global_skip_validation
1471 )
1472 ):
-> 1473 return fit_method(estimator, *args, **kwargs)
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
model_selection\_search.py:1018, in BaseSearchCV.fit(self, X, y,
**params)
1012 results = self._format_results(
1013 all_candidate_params, n_splits, all_out,
all_more_results
1014 )
1016 return results
-> 1018 self._run_search(evaluate_candidates)
1020 # multimetric is determined here because in the case of a
callable
1021 # self.scoring the return type is only known after calling
1022 first_test_score = all_out[0]["test_scores"]
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
model_selection\_search.py:1572, in GridSearchCV._run_search(self,
evaluate_candidates)
1570 def _run_search(self, evaluate_candidates):
1571 """Search all candidates in param_grid"""
-> 1572 evaluate_candidates(ParameterGrid(self.param_grid))
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
model_selection\_search.py:964, in
BaseSearchCV.fit.<locals>.evaluate_candidates(candidate_params, cv,
more_results)
956 if self.verbose > 0:
957 print(
958 "Fitting {0} folds for each of {1} candidates,"
959 " totalling {2} fits".format(
960 n_splits, n_candidates, n_candidates * n_splits
961 )
962 )
--> 964 out = parallel(
965 delayed(_fit_and_score)(
966 clone(base_estimator),
967 X,
968 y,
969 train=train,
970 test=test,
971 parameters=parameters,
972 split_progress=(split_idx, n_splits),
973 candidate_progress=(cand_idx, n_candidates),
974 **fit_and_score_kwargs,
975 )
976 for (cand_idx, parameters), (split_idx, (train, test)) in
product(
977 enumerate(candidate_params),
978 enumerate(cv.split(X, y,
**routed_params.splitter.split)),
979 )
980 )
982 if len(out) < 1:
983 raise ValueError(
984 "No fits were performed. "
985 "Was the CV iterator empty? "
986 "Were there no candidates?"
987 )
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\utils\
parallel.py:74, in Parallel.__call__(self, iterable)
69 config = get_config()
70 iterable_with_config = (
71 (_with_config(delayed_func, config), args, kwargs)
72 for delayed_func, args, kwargs in iterable
73 )
---> 74 return super().__call__(iterable_with_config)
File ~\anaconda3\Lib\site-packages\joblib\parallel.py:1918, in
Parallel.__call__(self, iterable)
1916 output = self._get_sequential_output(iterable)
1917 next(output)
-> 1918 return output if self.return_generator else list(output)
1920 # Let's create an ID that uniquely identifies the current
call. If the
1921 # call is interrupted early and that the same instance is
immediately
1922 # re-used, this id will be used to prevent workers that were
1923 # concurrently finalizing a task from the previous call to run
the
1924 # callback.
1925 with self._lock:
File ~\anaconda3\Lib\site-packages\joblib\parallel.py:1847, in
Parallel._get_sequential_output(self, iterable)
1845 self.n_dispatched_batches += 1
1846 self.n_dispatched_tasks += 1
-> 1847 res = func(*args, **kwargs)
1848 self.n_completed_tasks += 1
1849 self.print_progress()
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\utils\
parallel.py:136, in _FuncWrapper.__call__(self, *args, **kwargs)
134 config = {}
135 with config_context(**config):
--> 136 return self.function(*args, **kwargs)
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
model_selection\_validation.py:888, in _fit_and_score(estimator, X, y,
scorer, train, test, verbose, parameters, fit_params, score_params,
return_train_score, return_parameters, return_n_test_samples,
return_times, return_estimator, split_progress, candidate_progress,
error_score)
886 estimator.fit(X_train, **fit_params)
887 else:
--> 888 estimator.fit(X_train, y_train, **fit_params)
890 except Exception:
891 # Note fit time as time until error
892 fit_time = time.time() - start_time
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
base.py:1473, in
_fit_context.<locals>.decorator.<locals>.wrapper(estimator, *args,
**kwargs)
1466 estimator._validate_params()
1468 with config_context(
1469 skip_parameter_validation=(
1470 prefer_skip_nested_validation or
global_skip_validation
1471 )
1472 ):
-> 1473 return fit_method(estimator, *args, **kwargs)
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:751, in
BaseMultilayerPerceptron.fit(self, X, y)
733 @_fit_context(prefer_skip_nested_validation=True)
734 def fit(self, X, y):
735 """Fit the model to data matrix X and target(s) y.
736
737 Parameters
(...)
749 Returns a trained MLP model.
750 """
--> 751 return self._fit(X, y, incremental=False)
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:488, in
BaseMultilayerPerceptron._fit(self, X, y, incremental)
486 # Run the LBFGS solver
487 elif self.solver == "lbfgs":
--> 488 self._fit_lbfgs(
489 X, y, activations, deltas, coef_grads,
intercept_grads, layer_units
490 )
492 # validate parameter weights
493 weights = chain(self.coefs_, self.intercepts_)
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:532, in
BaseMultilayerPerceptron._fit_lbfgs(self, X, y, activations, deltas,
coef_grads, intercept_grads, layer_units)
529 else:
530 iprint = -1
--> 532 opt_res = scipy.optimize.minimize(
533 self._loss_grad_lbfgs,
534 packed_coef_inter,
535 method="L-BFGS-B",
536 jac=True,
537 options={
538 "maxfun": self.max_fun,
539 "maxiter": self.max_iter,
540 "iprint": iprint,
541 "gtol": self.tol,
542 },
543 args=(X, y, activations, deltas, coef_grads,
intercept_grads),
544 )
545 self.n_iter_ = _check_optimize_result("lbfgs", opt_res,
self.max_iter)
546 self.loss_ = opt_res.fun
File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_minimize.py:731, in minimize(fun, x0, args, method, jac, hess, hessp,
bounds, constraints, tol, callback, options)
728 res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp,
callback,
729 **options)
730 elif meth == 'l-bfgs-b':
--> 731 res = _minimize_lbfgsb(fun, x0, args, jac, bounds,
732 callback=callback, **options)
733 elif meth == 'tnc':
734 res = _minimize_tnc(fun, x0, args, jac, bounds,
callback=callback,
735 **options)
File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_lbfgsb_py.py:407, in _minimize_lbfgsb(fun, x0, args, jac, bounds,
disp, maxcor, ftol, gtol, eps, maxfun, maxiter, iprint, callback,
maxls, finite_diff_rel_step, **unknown_options)
401 task_str = task.tobytes()
402 if task_str.startswith(b'FG'):
403 # The minimization routine wants f and g at the current x.
404 # Note that interruptions due to maxfun are postponed
405 # until the completion of the current minimization
iteration.
406 # Overwrite f and g:
--> 407 f, g = func_and_grad(x)
408 elif task_str.startswith(b'NEW_X'):
409 # new iteration
410 n_iterations += 1
File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_differentiable_functions.py:343, in ScalarFunction.fun_and_grad(self,
x)
341 if not np.array_equal(x, self.x):
342 self._update_x(x)
--> 343 self._update_fun()
344 self._update_grad()
345 return self.f, self.g
File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_differentiable_functions.py:294, in ScalarFunction._update_fun(self)
292 def _update_fun(self):
293 if not self.f_updated:
--> 294 fx = self._wrapped_fun(self.x)
295 if fx < self._lowest_f:
296 self._lowest_x = self.x
File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_differentiable_functions.py:20, in _wrapper_fun.<locals>.wrapped(x)
16 ncalls[0] += 1
17 # Send a copy because the user may overwrite it.
18 # Overwriting results in undefined behaviour because
19 # fun(self.x) will change self.x, with the two no longer
linked.
---> 20 fx = fun(np.copy(x), *args)
21 # Make sure the function returns a true scalar
22 if not np.isscalar(fx):
File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_optimize.py:79, in MemoizeJac.__call__(self, x, *args)
77 def __call__(self, x, *args):
78 """ returns the function value """
---> 79 self._compute_if_needed(x, *args)
80 return self._value
File ~\AppData\Roaming\Python\Python312\site-packages\scipy\optimize\
_optimize.py:73, in MemoizeJac._compute_if_needed(self, x, *args)
71 if not np.all(x == self.x) or self._value is None or self.jac
is None:
72 self.x = np.asarray(x).copy()
---> 73 fg = self.fun(x, *args)
74 self.jac = fg[1]
75 self._value = fg[0]
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:281, in
BaseMultilayerPerceptron._loss_grad_lbfgs(self, packed_coef_inter, X,
y, activations, deltas, coef_grads, intercept_grads)
240 """Compute the MLP loss function and its corresponding
derivatives
241 with respect to the different parameters given in the
initialization.
242
(...)
278 grad : array-like, shape (number of nodes of all layers,)
279 """
280 self._unpack(packed_coef_inter)
--> 281 loss, coef_grads, intercept_grads = self._backprop(
282 X, y, activations, deltas, coef_grads, intercept_grads
283 )
284 grad = _pack(coef_grads, intercept_grads)
285 return loss, grad
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\
neural_network\_multilayer_perceptron.py:357, in
BaseMultilayerPerceptron._backprop(self, X, y, activations, deltas,
coef_grads, intercept_grads)
355 # Iterate over the hidden layers
356 for i in range(self.n_layers_ - 2, 0, -1):
--> 357 deltas[i - 1] = safe_sparse_dot(deltas[i],
self.coefs_[i].T)
358 inplace_derivative(activations[i], deltas[i - 1])
360 self._compute_loss_grad(
361 i - 1, n_samples, activations, deltas, coef_grads,
intercept_grads
362 )
File ~\AppData\Roaming\Python\Python312\site-packages\sklearn\utils\
extmath.py:208, in safe_sparse_dot(a, b, dense_output)
204 else:
205 ret = a @ b
207 if (
--> 208 sparse.issparse(a)
209 and sparse.issparse(b)
210 and dense_output
211 and hasattr(ret, "toarray")
212 ):
213 return ret.toarray()
214 return ret
File ~\AppData\Roaming\Python\Python312\site-packages\scipy\sparse\
_base.py:1335, in issparse(x)
1329 """A namespace class to separate sparray from spmatrix"""
1332 sparray.__doc__ = _spbase.__doc__
-> 1335 def issparse(x):
1336 """Is `x` of a sparse array or sparse matrix type?
1337
1338 Parameters
(...)
1359 False
1360 """
1361 return isinstance(x, _spbase)
KeyboardInterrupt: