GridSearchCV - Error: The truth value of an array with more than one element is ambiguous. Use a.any() or...
I am trying to do a Neural Network Classification using scikit-learn
in python.
I generated my data, split it to train and test, and used it in the model MLPClassifier()
.
What I plan to do next is to evaluate the parameters used in this model using sklearn.model_selection.GridSearchCV
.
Here is my code:
import matplotlib.pyplot as plt
import numpy as np
import itertools
from sklearn.neural_network import MLPClassifier
from sklearn.datasets.samples_generator import make_blobs, make_moons
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
X, y = make_blobs(n_samples=500, centers=5, n_features=2, random_state=10, cluster_std=2.5)
y[y==0] = -1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=10)
X_train
and X_test
are arrays with 2 features.
model_MLP_RAW = MLPClassifier()
model_MLP_RAW.fit(X_train, y_train)
model_MLP_RAW.predict(X_test) == y_test
model_MLP_RAW.score(X_test, y_test)
model_MLP_RAW = MLPClassifier()
param_gridMLPC = {
'learning_rate': ["constant", "invscaling", "adaptive"],
'hidden_layer_sizes': [x for x in itertools.product((10,20,30,40,50,100),repeat=3)],
'alpha': [10.0 ** -np.arange(1, 7)],
'activation': ["logistic", "relu", "tanh"]
}
CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
CV_unknwnMLPC.fit(X_train, y_train)
print(CV_unknwnMLPC.best_params_)
Everything works fine but at the line CV_unknwnMLPC.fit(X_train, y_train)
I am getting the following error:
ValueError Traceback (most recent call last)
<ipython-input-30-90faf7e56738> in <module>()
10
11 CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
---> 12 CV_unknwnMLPC.fit(X_train, y_train)
13
14 print(CV_unknwnMLPC.best_params_)
~Anaconda3libsite-packagessklearnmodel_selection_search.py in fit(self, X, y, groups, **fit_params)
638 error_score=self.error_score)
639 for parameters, (train, test) in product(candidate_params,
--> 640 cv.split(X, y, groups)))
641
642 # if one choose to see train score, "out" will contain train score info
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in __call__(self, iterable)
777 # was dispatched. In particular this covers the edge
778 # case of Parallel used with an exhausted iterator.
--> 779 while self.dispatch_one_batch(iterator):
780 self._iterating = True
781 else:
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in dispatch_one_batch(self, iterator)
623 return False
624 else:
--> 625 self._dispatch(tasks)
626 return True
627
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in _dispatch(self, batch)
586 dispatch_timestamp = time.time()
587 cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
--> 588 job = self._backend.apply_async(batch, callback=cb)
589 self._jobs.append(job)
590
~Anaconda3libsite-packagessklearnexternalsjoblib_parallel_backends.py in apply_async(self, func, callback)
109 def apply_async(self, func, callback=None):
110 """Schedule a func to be run"""
--> 111 result = ImmediateResult(func)
112 if callback:
113 callback(result)
~Anaconda3libsite-packagessklearnexternalsjoblib_parallel_backends.py in __init__(self, batch)
330 # Don't delay the application, to avoid keeping the input
331 # arguments in memory
--> 332 self.results = batch()
333
334 def get(self):
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in __call__(self)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in <listcomp>(.0)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~Anaconda3libsite-packagessklearnmodel_selection_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, error_score)
456 estimator.fit(X_train, **fit_params)
457 else:
--> 458 estimator.fit(X_train, y_train, **fit_params)
459
460 except Exception as e:
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in fit(self, X, y)
971 """
972 return self._fit(X, y, incremental=(self.warm_start and
--> 973 hasattr(self, "classes_")))
974
975 @property
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in _fit(self, X, y, incremental)
324
325 # Validate input parameters.
--> 326 self._validate_hyperparameters()
327 if np.any(np.array(hidden_layer_sizes) <= 0):
328 raise ValueError("hidden_layer_sizes must be > 0, got %s." %
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in _validate_hyperparameters(self)
390 if self.max_iter <= 0:
391 raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
--> 392 if self.alpha < 0.0:
393 raise ValueError("alpha must be >= 0, got %s." % self.alpha)
394 if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
I checked some answers online and I double-checked the parameters in param_gridMLPC
to make sure they're well provided but the error persists.
What am I doing wrong?
Thanks in advance
python scikit-learn grid-search gridsearchcv
add a comment |
I am trying to do a Neural Network Classification using scikit-learn
in python.
I generated my data, split it to train and test, and used it in the model MLPClassifier()
.
What I plan to do next is to evaluate the parameters used in this model using sklearn.model_selection.GridSearchCV
.
Here is my code:
import matplotlib.pyplot as plt
import numpy as np
import itertools
from sklearn.neural_network import MLPClassifier
from sklearn.datasets.samples_generator import make_blobs, make_moons
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
X, y = make_blobs(n_samples=500, centers=5, n_features=2, random_state=10, cluster_std=2.5)
y[y==0] = -1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=10)
X_train
and X_test
are arrays with 2 features.
model_MLP_RAW = MLPClassifier()
model_MLP_RAW.fit(X_train, y_train)
model_MLP_RAW.predict(X_test) == y_test
model_MLP_RAW.score(X_test, y_test)
model_MLP_RAW = MLPClassifier()
param_gridMLPC = {
'learning_rate': ["constant", "invscaling", "adaptive"],
'hidden_layer_sizes': [x for x in itertools.product((10,20,30,40,50,100),repeat=3)],
'alpha': [10.0 ** -np.arange(1, 7)],
'activation': ["logistic", "relu", "tanh"]
}
CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
CV_unknwnMLPC.fit(X_train, y_train)
print(CV_unknwnMLPC.best_params_)
Everything works fine but at the line CV_unknwnMLPC.fit(X_train, y_train)
I am getting the following error:
ValueError Traceback (most recent call last)
<ipython-input-30-90faf7e56738> in <module>()
10
11 CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
---> 12 CV_unknwnMLPC.fit(X_train, y_train)
13
14 print(CV_unknwnMLPC.best_params_)
~Anaconda3libsite-packagessklearnmodel_selection_search.py in fit(self, X, y, groups, **fit_params)
638 error_score=self.error_score)
639 for parameters, (train, test) in product(candidate_params,
--> 640 cv.split(X, y, groups)))
641
642 # if one choose to see train score, "out" will contain train score info
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in __call__(self, iterable)
777 # was dispatched. In particular this covers the edge
778 # case of Parallel used with an exhausted iterator.
--> 779 while self.dispatch_one_batch(iterator):
780 self._iterating = True
781 else:
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in dispatch_one_batch(self, iterator)
623 return False
624 else:
--> 625 self._dispatch(tasks)
626 return True
627
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in _dispatch(self, batch)
586 dispatch_timestamp = time.time()
587 cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
--> 588 job = self._backend.apply_async(batch, callback=cb)
589 self._jobs.append(job)
590
~Anaconda3libsite-packagessklearnexternalsjoblib_parallel_backends.py in apply_async(self, func, callback)
109 def apply_async(self, func, callback=None):
110 """Schedule a func to be run"""
--> 111 result = ImmediateResult(func)
112 if callback:
113 callback(result)
~Anaconda3libsite-packagessklearnexternalsjoblib_parallel_backends.py in __init__(self, batch)
330 # Don't delay the application, to avoid keeping the input
331 # arguments in memory
--> 332 self.results = batch()
333
334 def get(self):
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in __call__(self)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in <listcomp>(.0)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~Anaconda3libsite-packagessklearnmodel_selection_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, error_score)
456 estimator.fit(X_train, **fit_params)
457 else:
--> 458 estimator.fit(X_train, y_train, **fit_params)
459
460 except Exception as e:
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in fit(self, X, y)
971 """
972 return self._fit(X, y, incremental=(self.warm_start and
--> 973 hasattr(self, "classes_")))
974
975 @property
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in _fit(self, X, y, incremental)
324
325 # Validate input parameters.
--> 326 self._validate_hyperparameters()
327 if np.any(np.array(hidden_layer_sizes) <= 0):
328 raise ValueError("hidden_layer_sizes must be > 0, got %s." %
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in _validate_hyperparameters(self)
390 if self.max_iter <= 0:
391 raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
--> 392 if self.alpha < 0.0:
393 raise ValueError("alpha must be >= 0, got %s." % self.alpha)
394 if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
I checked some answers online and I double-checked the parameters in param_gridMLPC
to make sure they're well provided but the error persists.
What am I doing wrong?
Thanks in advance
python scikit-learn grid-search gridsearchcv
add a comment |
I am trying to do a Neural Network Classification using scikit-learn
in python.
I generated my data, split it to train and test, and used it in the model MLPClassifier()
.
What I plan to do next is to evaluate the parameters used in this model using sklearn.model_selection.GridSearchCV
.
Here is my code:
import matplotlib.pyplot as plt
import numpy as np
import itertools
from sklearn.neural_network import MLPClassifier
from sklearn.datasets.samples_generator import make_blobs, make_moons
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
X, y = make_blobs(n_samples=500, centers=5, n_features=2, random_state=10, cluster_std=2.5)
y[y==0] = -1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=10)
X_train
and X_test
are arrays with 2 features.
model_MLP_RAW = MLPClassifier()
model_MLP_RAW.fit(X_train, y_train)
model_MLP_RAW.predict(X_test) == y_test
model_MLP_RAW.score(X_test, y_test)
model_MLP_RAW = MLPClassifier()
param_gridMLPC = {
'learning_rate': ["constant", "invscaling", "adaptive"],
'hidden_layer_sizes': [x for x in itertools.product((10,20,30,40,50,100),repeat=3)],
'alpha': [10.0 ** -np.arange(1, 7)],
'activation': ["logistic", "relu", "tanh"]
}
CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
CV_unknwnMLPC.fit(X_train, y_train)
print(CV_unknwnMLPC.best_params_)
Everything works fine but at the line CV_unknwnMLPC.fit(X_train, y_train)
I am getting the following error:
ValueError Traceback (most recent call last)
<ipython-input-30-90faf7e56738> in <module>()
10
11 CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
---> 12 CV_unknwnMLPC.fit(X_train, y_train)
13
14 print(CV_unknwnMLPC.best_params_)
~Anaconda3libsite-packagessklearnmodel_selection_search.py in fit(self, X, y, groups, **fit_params)
638 error_score=self.error_score)
639 for parameters, (train, test) in product(candidate_params,
--> 640 cv.split(X, y, groups)))
641
642 # if one choose to see train score, "out" will contain train score info
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in __call__(self, iterable)
777 # was dispatched. In particular this covers the edge
778 # case of Parallel used with an exhausted iterator.
--> 779 while self.dispatch_one_batch(iterator):
780 self._iterating = True
781 else:
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in dispatch_one_batch(self, iterator)
623 return False
624 else:
--> 625 self._dispatch(tasks)
626 return True
627
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in _dispatch(self, batch)
586 dispatch_timestamp = time.time()
587 cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
--> 588 job = self._backend.apply_async(batch, callback=cb)
589 self._jobs.append(job)
590
~Anaconda3libsite-packagessklearnexternalsjoblib_parallel_backends.py in apply_async(self, func, callback)
109 def apply_async(self, func, callback=None):
110 """Schedule a func to be run"""
--> 111 result = ImmediateResult(func)
112 if callback:
113 callback(result)
~Anaconda3libsite-packagessklearnexternalsjoblib_parallel_backends.py in __init__(self, batch)
330 # Don't delay the application, to avoid keeping the input
331 # arguments in memory
--> 332 self.results = batch()
333
334 def get(self):
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in __call__(self)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in <listcomp>(.0)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~Anaconda3libsite-packagessklearnmodel_selection_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, error_score)
456 estimator.fit(X_train, **fit_params)
457 else:
--> 458 estimator.fit(X_train, y_train, **fit_params)
459
460 except Exception as e:
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in fit(self, X, y)
971 """
972 return self._fit(X, y, incremental=(self.warm_start and
--> 973 hasattr(self, "classes_")))
974
975 @property
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in _fit(self, X, y, incremental)
324
325 # Validate input parameters.
--> 326 self._validate_hyperparameters()
327 if np.any(np.array(hidden_layer_sizes) <= 0):
328 raise ValueError("hidden_layer_sizes must be > 0, got %s." %
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in _validate_hyperparameters(self)
390 if self.max_iter <= 0:
391 raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
--> 392 if self.alpha < 0.0:
393 raise ValueError("alpha must be >= 0, got %s." % self.alpha)
394 if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
I checked some answers online and I double-checked the parameters in param_gridMLPC
to make sure they're well provided but the error persists.
What am I doing wrong?
Thanks in advance
python scikit-learn grid-search gridsearchcv
I am trying to do a Neural Network Classification using scikit-learn
in python.
I generated my data, split it to train and test, and used it in the model MLPClassifier()
.
What I plan to do next is to evaluate the parameters used in this model using sklearn.model_selection.GridSearchCV
.
Here is my code:
import matplotlib.pyplot as plt
import numpy as np
import itertools
from sklearn.neural_network import MLPClassifier
from sklearn.datasets.samples_generator import make_blobs, make_moons
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
X, y = make_blobs(n_samples=500, centers=5, n_features=2, random_state=10, cluster_std=2.5)
y[y==0] = -1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=10)
X_train
and X_test
are arrays with 2 features.
model_MLP_RAW = MLPClassifier()
model_MLP_RAW.fit(X_train, y_train)
model_MLP_RAW.predict(X_test) == y_test
model_MLP_RAW.score(X_test, y_test)
model_MLP_RAW = MLPClassifier()
param_gridMLPC = {
'learning_rate': ["constant", "invscaling", "adaptive"],
'hidden_layer_sizes': [x for x in itertools.product((10,20,30,40,50,100),repeat=3)],
'alpha': [10.0 ** -np.arange(1, 7)],
'activation': ["logistic", "relu", "tanh"]
}
CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
CV_unknwnMLPC.fit(X_train, y_train)
print(CV_unknwnMLPC.best_params_)
Everything works fine but at the line CV_unknwnMLPC.fit(X_train, y_train)
I am getting the following error:
ValueError Traceback (most recent call last)
<ipython-input-30-90faf7e56738> in <module>()
10
11 CV_unknwnMLPC = GridSearchCV(estimator=model_MLP_RAW, param_grid=param_gridMLPC, cv= 5)
---> 12 CV_unknwnMLPC.fit(X_train, y_train)
13
14 print(CV_unknwnMLPC.best_params_)
~Anaconda3libsite-packagessklearnmodel_selection_search.py in fit(self, X, y, groups, **fit_params)
638 error_score=self.error_score)
639 for parameters, (train, test) in product(candidate_params,
--> 640 cv.split(X, y, groups)))
641
642 # if one choose to see train score, "out" will contain train score info
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in __call__(self, iterable)
777 # was dispatched. In particular this covers the edge
778 # case of Parallel used with an exhausted iterator.
--> 779 while self.dispatch_one_batch(iterator):
780 self._iterating = True
781 else:
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in dispatch_one_batch(self, iterator)
623 return False
624 else:
--> 625 self._dispatch(tasks)
626 return True
627
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in _dispatch(self, batch)
586 dispatch_timestamp = time.time()
587 cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
--> 588 job = self._backend.apply_async(batch, callback=cb)
589 self._jobs.append(job)
590
~Anaconda3libsite-packagessklearnexternalsjoblib_parallel_backends.py in apply_async(self, func, callback)
109 def apply_async(self, func, callback=None):
110 """Schedule a func to be run"""
--> 111 result = ImmediateResult(func)
112 if callback:
113 callback(result)
~Anaconda3libsite-packagessklearnexternalsjoblib_parallel_backends.py in __init__(self, batch)
330 # Don't delay the application, to avoid keeping the input
331 # arguments in memory
--> 332 self.results = batch()
333
334 def get(self):
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in __call__(self)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~Anaconda3libsite-packagessklearnexternalsjoblibparallel.py in <listcomp>(.0)
129
130 def __call__(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
132
133 def __len__(self):
~Anaconda3libsite-packagessklearnmodel_selection_validation.py in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, error_score)
456 estimator.fit(X_train, **fit_params)
457 else:
--> 458 estimator.fit(X_train, y_train, **fit_params)
459
460 except Exception as e:
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in fit(self, X, y)
971 """
972 return self._fit(X, y, incremental=(self.warm_start and
--> 973 hasattr(self, "classes_")))
974
975 @property
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in _fit(self, X, y, incremental)
324
325 # Validate input parameters.
--> 326 self._validate_hyperparameters()
327 if np.any(np.array(hidden_layer_sizes) <= 0):
328 raise ValueError("hidden_layer_sizes must be > 0, got %s." %
~Anaconda3libsite-packagessklearnneural_networkmultilayer_perceptron.py in _validate_hyperparameters(self)
390 if self.max_iter <= 0:
391 raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
--> 392 if self.alpha < 0.0:
393 raise ValueError("alpha must be >= 0, got %s." % self.alpha)
394 if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
I checked some answers online and I double-checked the parameters in param_gridMLPC
to make sure they're well provided but the error persists.
What am I doing wrong?
Thanks in advance
python scikit-learn grid-search gridsearchcv
python scikit-learn grid-search gridsearchcv
asked Jan 3 at 19:31
HelpASisterOutHelpASisterOut
1,02492960
1,02492960
add a comment |
add a comment |
1 Answer
1
active
oldest
votes
'alpha': [10.0 ** -np.arange(1, 7)]
In the documentation of MLPClassifier:-
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
"alpha"
should be float. So in the parameter grid, it can be a list of different floats.
But when you do this:
'alpha': [10.0 ** -np.arange(1, 7)]
This becomes a list of numpy array. That is a sort of a sequence of sequence (list of list, array of array, 2-d array etc). That means that the first element of list is an numpy array which will be passed to the internal MLPClassifier
in place of "alpha"
. That is the error.
You can do the following:
'alpha': 10.0 ** -np.arange(1, 7)
That will be a simple array, from which the elements (float values) will be chosen to send into the model.
add a comment |
Your Answer
StackExchange.ifUsing("editor", function () {
StackExchange.using("externalEditor", function () {
StackExchange.using("snippets", function () {
StackExchange.snippets.init();
});
});
}, "code-snippets");
StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "1"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);
StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});
function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: true,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: 10,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});
}
});
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f54028667%2fgridsearchcv-error-the-truth-value-of-an-array-with-more-than-one-element-is%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
1 Answer
1
active
oldest
votes
1 Answer
1
active
oldest
votes
active
oldest
votes
active
oldest
votes
'alpha': [10.0 ** -np.arange(1, 7)]
In the documentation of MLPClassifier:-
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
"alpha"
should be float. So in the parameter grid, it can be a list of different floats.
But when you do this:
'alpha': [10.0 ** -np.arange(1, 7)]
This becomes a list of numpy array. That is a sort of a sequence of sequence (list of list, array of array, 2-d array etc). That means that the first element of list is an numpy array which will be passed to the internal MLPClassifier
in place of "alpha"
. That is the error.
You can do the following:
'alpha': 10.0 ** -np.arange(1, 7)
That will be a simple array, from which the elements (float values) will be chosen to send into the model.
add a comment |
'alpha': [10.0 ** -np.arange(1, 7)]
In the documentation of MLPClassifier:-
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
"alpha"
should be float. So in the parameter grid, it can be a list of different floats.
But when you do this:
'alpha': [10.0 ** -np.arange(1, 7)]
This becomes a list of numpy array. That is a sort of a sequence of sequence (list of list, array of array, 2-d array etc). That means that the first element of list is an numpy array which will be passed to the internal MLPClassifier
in place of "alpha"
. That is the error.
You can do the following:
'alpha': 10.0 ** -np.arange(1, 7)
That will be a simple array, from which the elements (float values) will be chosen to send into the model.
add a comment |
'alpha': [10.0 ** -np.arange(1, 7)]
In the documentation of MLPClassifier:-
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
"alpha"
should be float. So in the parameter grid, it can be a list of different floats.
But when you do this:
'alpha': [10.0 ** -np.arange(1, 7)]
This becomes a list of numpy array. That is a sort of a sequence of sequence (list of list, array of array, 2-d array etc). That means that the first element of list is an numpy array which will be passed to the internal MLPClassifier
in place of "alpha"
. That is the error.
You can do the following:
'alpha': 10.0 ** -np.arange(1, 7)
That will be a simple array, from which the elements (float values) will be chosen to send into the model.
'alpha': [10.0 ** -np.arange(1, 7)]
In the documentation of MLPClassifier:-
alpha : float, optional, default 0.0001
L2 penalty (regularization term) parameter.
"alpha"
should be float. So in the parameter grid, it can be a list of different floats.
But when you do this:
'alpha': [10.0 ** -np.arange(1, 7)]
This becomes a list of numpy array. That is a sort of a sequence of sequence (list of list, array of array, 2-d array etc). That means that the first element of list is an numpy array which will be passed to the internal MLPClassifier
in place of "alpha"
. That is the error.
You can do the following:
'alpha': 10.0 ** -np.arange(1, 7)
That will be a simple array, from which the elements (float values) will be chosen to send into the model.
answered Jan 4 at 7:34
Vivek KumarVivek Kumar
16.8k42156
16.8k42156
add a comment |
add a comment |
Thanks for contributing an answer to Stack Overflow!
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
To learn more, see our tips on writing great answers.
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f54028667%2fgridsearchcv-error-the-truth-value-of-an-array-with-more-than-one-element-is%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown