This repository has been archived by the owner on Jul 24, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 59
ElasticNet.fit raises ValueError when not converging instead of just issuing a warning #74
Comments
Changing the seed in the above script to 1 gives me a different error: ValueError Traceback (most recent call last)
ValueError: unexpected array size: new_size=1, got array with arr_size=0
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last)
<ipython-input-4-49f76e8921b5> in <module>
10
11
---> 12 clf2 = glmnet.ElasticNet(alpha=1, lambda_path=[
13 alpha_max, alpha_max/100], standardize=False, fit_intercept=False, tol=1e-10, max_iter=1).fit(X, y)
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/glmnet/linear.py in fit(self, X, y, sample_weight, relative_penalties, groups)
236 self._cv = GroupKFold(n_splits=self.n_splits)
237
--> 238 cv_scores = _score_lambda_path(self, X, y, groups,
239 sample_weight,
240 relative_penalties,
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/glmnet/util.py in _score_lambda_path(est, X, y, groups, sample_weight, relative_penalties, scoring, n_jobs, verbose)
64 warnings.simplefilter(action, UndefinedMetricWarning)
65
---> 66 scores = Parallel(n_jobs=n_jobs, verbose=verbose, backend='threading')(
67 delayed(_fit_and_score)(est, scorer, X, y, sample_weight, relative_penalties,
68 est.lambda_path_, train_idx, test_idx)
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/joblib/parallel.py in __call__(self, iterable)
1041 # remaining jobs.
1042 self._iterating = False
-> 1043 if self.dispatch_one_batch(iterator):
1044 self._iterating = self._original_iterator is not None
1045
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/joblib/parallel.py in dispatch_one_batch(self, iterator)
859 return False
860 else:
--> 861 self._dispatch(tasks)
862 return True
863
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/joblib/parallel.py in _dispatch(self, batch)
777 with self._lock:
778 job_idx = len(self._jobs)
--> 779 job = self._backend.apply_async(batch, callback=cb)
780 # A job can complete so quickly than its callback is
781 # called before we get here, causing self._jobs to
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/joblib/_parallel_backends.py in apply_async(self, func, callback)
206 def apply_async(self, func, callback=None):
207 """Schedule a func to be run"""
--> 208 result = ImmediateResult(func)
209 if callback:
210 callback(result)
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/joblib/_parallel_backends.py in __init__(self, batch)
570 # Don't delay the application, to avoid keeping the input
571 # arguments in memory
--> 572 self.results = batch()
573
574 def get(self):
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/joblib/parallel.py in __call__(self)
260 # change the default number of processes to -1
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 262 return [func(*args, **kwargs)
263 for func, args, kwargs in self.items]
264
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/joblib/parallel.py in <listcomp>(.0)
260 # change the default number of processes to -1
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 262 return [func(*args, **kwargs)
263 for func, args, kwargs in self.items]
264
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/glmnet/util.py in _fit_and_score(est, scorer, X, y, sample_weight, relative_penalties, score_lambda_path, train_inx, test_inx)
114 """
115 m = clone(est)
--> 116 m = m._fit(X[train_inx, :], y[train_inx], sample_weight[train_inx], relative_penalties)
117
118 lamb = np.clip(score_lambda_path, m.lambda_path_[-1], m.lambda_path_[0])
~/anaconda3/envs/benchopt_lasso/lib/python3.8/site-packages/glmnet/linear.py in _fit(self, X, y, sample_weight, relative_penalties)
372 ca = ca[:, :self.n_lambda_]
373 nin = nin[:self.n_lambda_]
--> 374 self.coef_path_ = solns(_x.shape[1], ca, ia, nin)
375
376 return self
ValueError: failed in converting 4th argument `nin' of _glmnet.solns to C/Fortran array |
Sign up for free
to subscribe to this conversation on GitHub.
Already have an account?
Sign in.
Thank you for your package, and for making it available on conda.
If I set a max_iter which is too low, instead of getting a convergence warning as in sklearn behavior, it simply fails with an error. Can this be fixed easily? I'm trying to get the solution for a single lambda (and from what I understood, if I use a default apth, I have no guarantee that glmnet will go to the end of it, it may early stop, which I don't want).
Reproduce with:
output:
ping @agramfort
The text was updated successfully, but these errors were encountered: