forked from epnev/SPGL1_python_port
-
Notifications
You must be signed in to change notification settings - Fork 0
/
spgl1.py
executable file
·810 lines (717 loc) · 31.3 KB
/
spgl1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
# def fakeFourier(idx,n,x,mode):
# # %PARTIALFOURIER Partial Fourier operator
# # %
# # % Y = PARTIALFOURIER(IDX,N,X,MODE)
# if mode==1:
# z = np.fft.fft(x) / np.sqrt(n)
# return z[idx].flatten()
# else:
# z = np.zeros(n,dtype=complex)
# z[idx] = x
# return np.fft.ifft(z) * np.sqrt(n)
# m=50
# n=128
# k=14
# A,Rtmp = qr(np.random.randn(n,m))
# A=A.T
# p = permutation(n)
# p=p[0:k]
# x0=zeros(n)
# x0[p]=random.randn(k)
# b=dot(A,x0)
from lsqr import *
from spgl_aux import *
import numpy as np
def Aprodprelambda(A,x,mode):
from inspect import isfunction
if mode == 1:
if not isfunction(A):
return np.dot(A,x)
else:
return A(x,1)
else:
if not isfunction(A):
return np.conj(np.dot(np.conj(x.T),A).T)
else:
return A(x,2)
def spgl1( A, b, tau=[], sigma=[], x=[], options={} ):
# %SPGL1 Solve basis pursuit, basis pursuit denoise, and LASSO
# %
# % [x, r, g, info] = spgl1(A, b, tau, sigma, x0, options)
# %
# % ---------------------------------------------------------------------
# % Solve the basis pursuit denoise (BPDN) problem
# %
# % (BPDN) minimize ||x||_1 subj to ||Ax-b||_2 <= sigma,
# %
# % or the l1-regularized least-squares problem
# %
# % (LASSO) minimize ||Ax-b||_2 subj to ||x||_1 <= tau.
# % ---------------------------------------------------------------------
# %
# % INPUTS
# % ======
# % A is an m-by-n matrix, explicit or an operator.
# % If A is a function, then it must have the signature
# %
# % y = A(x,mode) if mode == 1 then y = A x (y is m-by-1);
# % if mode == 2 then y = A'x (y is n-by-1).
# %
# % b is an m-vector.
# % tau is a nonnegative scalar; see (LASSO).
# % sigma if sigma != inf or != [], then spgl1 will launch into a
# % root-finding mode to find the tau above that solves (BPDN).
# % In this case, it's STRONGLY recommended that tau = 0.
# % x0 is an n-vector estimate of the solution (possibly all
# % zeros). If x0 = [], then SPGL1 determines the length n via
# % n = length( A'b ) and sets x0 = zeros(n,1).
# % options is a structure of options from spgSetParms. Any unset options
# % are set to their default value; set options=[] to use all
# % default values.
# %
# % OUTPUTS
# % =======
# % x is a solution of the problem
# % r is the residual, r = b - Ax
# % g is the gradient, g = -A'r
# % info is a structure with the following information:
# % .tau final value of tau (see sigma above)
# % .rNorm two-norm of the optimal residual
# % .rGap relative duality gap (an optimality measure)
# % .gNorm Lagrange multiplier of (LASSO)
# % .stat = 1 found a BPDN solution
# % = 2 found a BP sol'n; exit based on small gradient
# % = 3 found a BP sol'n; exit based on small residual
# % = 4 found a LASSO solution
# % = 5 error: too many iterrations
# % = 6 error: linesearch failed
# % = 7 error: found suboptimal BP solution
# % = 8 error: too many matrix-vector products
# % .time total solution time (seconds)
# % .nProdA number of multiplications with A
# % .nProdAt number of multiplications with A'
# %
# % OPTIONS
# % =======
# % Use the options structure to control various aspects of the algorithm:
# %
# % options.fid File ID to direct log output
# % .verbosity 0=quiet, 1=some output, 2=more output.
# % .iterrations Max. number of iterrations (default if 10*m).
# % .bpTol Tolerance for identifying a basis pursuit solution.
# % .optTol Optimality tolerance (default is 1e-4).
# % .decTol Larger decTol means more frequent Newton updates.
# % .subspaceMin 0=no subspace minimization, 1=subspace minimization.
# %
# % EXAMPLE
# % =======
# % m = 120; n = 512; k = 20; % m rows, n cols, k nonzeros.
# % p = randperm(n); x0 = zeros(n,1); x0(p(1:k)) = sign(randn(k,1));
# % A = randn(m,n); [Q,R] = qr(A',0); A = Q';
# % b = A*x0 + 0.005 * randn(m,1);
# % opts = spgSetParms('optTol',1e-4);
# % [x,r,g,info] = spgl1(A, b, 0, 1e-3, [], opts); % Find BP sol'n.
# %
# % AUTHORS
# % =======
# % Ewout van den Berg ([email protected])
# % Michael P. Friedlander ([email protected])
# % Scientific Computing Laboratory (SCL)
# % University of British Columbia, Canada.
# Translated to python by David Relyea ([email protected])
# This translation needs work - the core routines need to be done in cython for speed
# Still, it works
# I have not performed any unit testing - the code may have hidden issues
# If you find any problems, please let me know
# %
# % BUGS
# % ====
# % Please send bug reports or comments to
# % Michael P. Friedlander ([email protected])
# % Ewout van den Berg ([email protected])
# % 15 Apr 07: First version derived from spg.m.
# % Michael P. Friedlander ([email protected]).
# % Ewout van den Berg ([email protected]).
# % 17 Apr 07: Added root-finding code.
# % 18 Apr 07: sigma was being compared to 1/2 r'r, rather than
# % norm(r), as advertised. Now immediately change sigma to
# % (1/2)sigma^2, and changed log output accordingly.
# % 24 Apr 07: Added quadratic root-finding code as an option.
# % 24 Apr 07: Exit conditions need to guard against small ||r||
# % (ie, a BP solution). Added test1,test2,test3 below.
# % 15 May 07: Trigger to update tau is now based on relative difference
# % in objective between consecutive iterrations.
# % 15 Jul 07: Added code to allow a limited number of line-search
# % errors.
# % 23 Feb 08: Fixed bug in one-norm projection using weights. Thanks
# % to Xiangrui Meng for reporting this bug.
# % 26 May 08: The simple call spgl1(A,b) now solves (BPDN) with sigma=0.
# % 18 Mar 13: Reset f = fOld if curvilinear line-search fails.
# % Avoid computing the Barzilai-Borwein scaling parameter
# % when both line-search algorithms failed.
# 07 Feb 15: Code translated into python
# % ----------------------------------------------------------------------
# % This file is part of SPGL1 (Spectral Projected-Gradient for L1).
# %
# % Copyright (C) 2007 Ewout van den Berg and Michael P. Friedlander,
# % Department of Computer Science, University of British Columbia, Canada.
# % All rights reserved. E-mail: <{ewout78,mpf}@cs.ubc.ca>.
# %
# % SPGL1 is free software; you can redistribute it and/or modify it
# % under the terms of the GNU Lesser General Public License as
# % published by the Free Software Foundation; either version 2.1 of the
# % License, or (at your option) any later version.
# %
# % SPGL1 is distributed in the hope that it will be useful, but WITHOUT
# % ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# % or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# % Public License for more details.
# %
# % You should have received a copy of the GNU Lesser General Public
# % License along with SPGL1; if not, write to the Free Software
# % Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# % USA
# % ----------------------------------------------------------------------
# REVISION = '$Revision: 1017 $';
# DATE = '$Date: 2008-06-16 22:43:07 -0700 (Mon, 16 Jun 2008) $';
# REVISION = REVISION(11:end-1);
# DATE = DATE(35:50);
def betterAprod(A): return lambda x,mode: Aprodprelambda(A,x,mode)
Aprod = betterAprod(A)
m = np.size(b)
if False:
pass
# elif not b and not A:
# print('SPGL1 ERROR: At least two arguments are required')
elif not tau and not sigma:
tau = 0
sigma = 0
singleTau = False
elif not sigma: # tau is not empty
singleTau = True
else:
if not tau:
tau = 0
singleTau = False
options = spgSetParms(options, spgSetParms({'iterations':10*m}))
# fid = options['fid']
# logLevel = options['verbosity']
maxIts = options['iterations']
nPrevVals = options['nPrevVals']
bpTol = options['bpTol']
lsTol = options['lsTol']
optTol = options['optTol']
decTol = options['decTol']
stepMin = options['stepMin']
stepMax = options['stepMax']
activeSetIt = options['activeSetIt']
subspaceMin = options['subspaceMin']
maxMatvec = max(3,options['maxMatvec'])
weights = options['weights']
maxLineErrors = 10 #% Maximum number of line-search failures.
pivTol = 1e-12 #% Threshold for significant Newton step.
# %----------------------------------------------------------------------
# % Initialize local variables.
# %----------------------------------------------------------------------
iterr = 0
itnTotLSQR = 0 #% Total SPGL1 and LSQR iterrations.
nProdA = 0
nProdAt = 0
lastFv = -np.inf*np.ones(nPrevVals) #% Last m function values.
nLineTot = 0 #% Total no. of linesearch steps.
printTau = False
nNewton = 0
bNorm = np.linalg.norm(b)
stat = False
timeProject = 0
timeMatProd = 0
nnziterr = 0 #% No. of its with fixed pattern.
nnzIdx = [] #% Active-set indicator.
subspace = False #% Flag if did subspace min in current itn.
stepG = 1 #% Step length for projected gradient.
testUpdateTau = 0 #% Previous step did not update tau
#% Determine initial x, vector length n, and see if problem is complex
from inspect import isfunction
explicit = not isfunction(A)
if x==[]:
if explicit:
n = np.shape(A)[1]
realx = np.lib.isreal(A).all() and np.lib.isreal(b).all()
else:
x = Aprod(b,2)
n = np.size(x)
realx = np.lib.isreal(A).all() and np.lib.isreal(b).all()
x = np.zeros(n)
else:
n = np.size(x)
realx = np.lib.isreal(A).all() and np.lib.isreal(b).all()
if explicit:
realx = realx and np.lib.isreal(A).all()
#% Override options when options.iscomplex flag is set
if (not np.isnan(options['iscomplex'])):
realx = options['iscomplex'] == 0
#% Check if all weights (if any) are strictly positive. In previous
#% versions we also checked if the number of weights was equal to
#% n. In the case of multiple measurement vectors, this no longer
#% needs to apply, so the check was removed.
if weights is not None:
if not np.isfinite(weights).all():
print('SPGL1 ERROR: Entries in options.weights must be finite')
if np.any(weights <= 0):
print('SPGL1 ERROR: Entries in options.weights must be strictly positive')
else:
weights = 1
#% Quick exit if sigma >= ||b||. Set tau = 0 to short-circuit the loop.
if bNorm <= sigma:
print('W: sigma >= ||b||. Exact solution is x = 0.')
tau = 0
singleTau = True
#% Dont do subspace minimization if x is complex.
if not realx and subspaceMin:
print('W: Subspace minimization disabled when variables are complex.')
subspaceMin = False
#% Pre-allocate iteration info vectors
xNorm1 = np.zeros(min(maxIts,10000))
rNorm2 = np.zeros(min(maxIts,10000))
lambdaa = np.zeros(min(maxIts,10000))
#% Exit conditions (constants).
EXIT_ROOT_FOUND = 1
EXIT_BPSOL_FOUND = 2
EXIT_LEAST_SQUARES = 3
EXIT_OPTIMAL = 4
EXIT_iterrATIONS = 5
EXIT_LINE_ERROR = 6
EXIT_SUBOPTIMAL_BP = 7
EXIT_MATVEC_LIMIT = 8
EXIT_ACTIVE_SET = 9
#%----------------------------------------------------------------------
#% Log header.
#%----------------------------------------------------------------------
# DO THIS LATER
# print('');
# print(' #%s\n',repmat('=',1,80));
# print(' SPGL1 v.#%s (#%s)\n', REVISION, DATE);
# print(' #%s\n',repmat('=',1,80));
# print(' #%-22s: #%8i #%4s' ,'No. rows' ,m ,'');
# print(' #%-22s: #%8i\n' ,'No. columns' ,n );
# print(' #%-22s: #%8.2e #%4s' ,'Initial tau' ,tau ,'');
# print(' #%-22s: #%8.2e\n' ,'Two-norm of b' ,bNorm );
# print(' #%-22s: #%8.2e #%4s' ,'Optimality tol' ,optTol ,'');
# if singleTau
# print(' #%-22s: #%8.2e\n' ,'Target one-norm of x' ,tau );
# else
# print(' #%-22s: #%8.2e\n','Target objective' ,sigma );
# end
# print(' #%-22s: #%8.2e #%4s' ,'Basis pursuit tol' ,bpTol ,'');
# print(' #%-22s: #%8i\n' ,'Maximum iterrations',maxIts );
# print('\n');
# if singleTau
# logB = ' #%5i #%13.7e #%13.7e #%9.2e #%6.1f #%6i #%6i';
# logH = ' #%5s #%13s #%13s #%9s #%6s #%6s #%6s\n';
# print(logH,'iterr','Objective','Relative Gap','gNorm','stepG','nnzX','nnzG');
# else
# logB = ' #%5i #%13.7e #%13.7e #%9.2e #%9.3e #%6.1f #%6i #%6i';
# logH = ' #%5s #%13s #%13s #%9s #%9s #%6s #%6s #%6s #%13s\n';
# print(logH,'iterr','Objective','Relative Gap','Rel Error',...
# 'gNorm','stepG','nnzX','nnzG','tau');
# end
#% Project the starting point and evaluate function and gradient.
spglproject=options['project']
x = spglproject(x,weights,tau)
r = b - Aprod(x,1) #% r = b - Ax
g = - Aprod(r,2) #% g = -A'r
f = abs(np.dot(np.conj(r),r)) / 2.
#% Required for nonmonotone strategy.
lastFv[0] = f.copy()
fBest = f.copy()
xBest = x.copy()
fOld = f.copy()
#% Compute projected gradient direction and initial steplength.
dx = spglproject(x - g, weights, tau) - x
dxNorm = np.linalg.norm(dx,np.inf)
if dxNorm < (1. / stepMax):
gStep = stepMax
else:
gStep = min( stepMax, max(stepMin, 1./dxNorm) )
#%----------------------------------------------------------------------
#% MAIN LOOP.
#%----------------------------------------------------------------------
while 1:
#%------------------------------------------------------------------
#% Test exit conditions.
#%------------------------------------------------------------------
#% Compute quantities needed for log and exit conditions.
gNorm = options['dual_norm'](-g,weights)
rNorm = np.linalg.norm(r)
gap = np.dot(np.conj(r), r-b) + tau*gNorm
rGap = abs(gap) / max(1.,f)
aError1 = rNorm - sigma
aError2 = f - sigma**2. / 2.
rError1 = abs(aError1) / max(1.,rNorm)
rError2 = abs(aError2) / max(1.,f)
#% Count number of consecutive iterrations with identical support.
nnzOld = nnzIdx
[nnzX,nnzG,nnzIdx,nnzDiff] = activeVars(x,g,nnzIdx,options);
if nnzDiff:
nnziterr = 0
else:
nnziterr = nnziterr + 1
if nnziterr+1 >= activeSetIt:
stat=EXIT_ACTIVE_SET
#% Single tau: Check if were optimal.
#% The 2nd condition is there to guard against large tau.
if singleTau:
if rGap <= optTol or rNorm < optTol*bNorm:
stat = EXIT_OPTIMAL
#% Multiple tau: Check if found root and/or if tau needs updating.
else:
#% Test if a least-squares solution has been found
if gNorm <= lsTol * rNorm:
stat = EXIT_LEAST_SQUARES
if rGap <= max(optTol, rError2) or rError1 <= optTol:
#% The problem is nearly optimal for the current tau.
#% Check optimality of the current root.
if rNorm <= sigma:
stat=EXIT_SUBOPTIMAL_BP #% Found suboptimal BP sol.
if rError1 <= optTol:
stat=EXIT_ROOT_FOUND #% Found approx root.
if rNorm <= bpTol * bNorm:
stat=EXIT_BPSOL_FOUND #% Resid minimzd -> BP sol.
#% 30 Jun 09: Large tau could mean large rGap even near LS sol.
#% Move LS check out of this if statement.
#% if test2, stat=EXIT_LEAST_SQUARES; end #% Gradient zero -> BP sol.
testRelChange1 = (abs(f - fOld) <= decTol * f)
testRelChange2 = (abs(f - fOld) <= 1e-1 * f * (abs(rNorm - sigma)))
testUpdateTau = ((testRelChange1 and rNorm > 2 * sigma) or \
(testRelChange2 and rNorm <= 2 * sigma)) and \
not stat and not testUpdateTau
if testUpdateTau:
#% Update tau.
tauOld = np.copy(tau)
tau = max(0,tau + (rNorm * aError1) / gNorm)
nNewton = nNewton + 1
printTau = abs(tauOld - tau) >= 1e-6 * tau #% For log only.
if tau < tauOld:
#% The one-norm ball has decreased. Need to make sure that the
#% next iterrate if feasible, which we do by projecting it.
x = spglproject(x,weights,tau)
#% Too many its and not converged.
if not stat and iterr+1 >= maxIts:
stat = EXIT_iterrATIONS
# #%------------------------------------------------------------------
# #% Print log, update history and act on exit conditions.
# #%------------------------------------------------------------------
# if logLevel >= 2 or singleTau or printTau or iterr == 0 or stat:
# tauFlag = ' '; subFlag = '';
# if printTau, tauFlag = sprintf(' #%13.7e',tau); end
# if subspace, subFlag = sprintf(' S #%2i',itnLSQR); end
# if singleTau
# printf(logB,iterr,rNorm,rGap,gNorm,log10(stepG),nnzX,nnzG);
# if subspace
# printf(' #%s',subFlag);
# end
# else
# printf(logB,iterr,rNorm,rGap,rError1,gNorm,log10(stepG),nnzX,nnzG);
# if printTau || subspace
# printf(' #%s',[tauFlag subFlag]);
# end
# end
# printf('\n');
# end
printTau = False
subspace = False
#% Update history info
#xNorm1[iterr] = options['primal_norm'](x,weights)
#rNorm2[iterr] = rNorm
#lambdaa[iterr] = gNorm
if stat:
break
#%==================================================================
#% iterrations begin here.
#%==================================================================
iterr = iterr + 1;
xOld = x.copy()
fOld = f.copy()
gOld = g.copy()
rOld = r.copy()
try:
#%---------------------------------------------------------------
#% Projected gradient step and linesearch.
#%---------------------------------------------------------------
f,x,r,nLine,stepG,lnErr = \
spgLineCurvy(x,gStep*g,max(lastFv),Aprod,b,spglproject,weights,tau)
nLineTot = nLineTot + nLine
if lnErr:
#% Projected backtrack failed. Retry with feasible dirn linesearch.
x = xOld.copy()
f = fOld.copy()
dx = spglproject(x - gStep*g, weights, tau) - x
gtd = np.dot(np.conj(g),dx)
[f,x,r,nLine,lnErr] = spgLine(f,x,dx,gtd,max(lastFv),Aprod,b)
nLineTot = nLineTot + nLine
if lnErr:
#% Failed again. Revert to previous iterrates and damp max BB step.
x = xOld
f = fOld
if maxLineErrors <= 0:
stat = EXIT_LINE_ERROR
else:
stepMax = stepMax / 10.;
print("W: Linesearch failed with error "+str(lnErr)+\
"Damping max BB scaling to "+str(stepMax));
maxLineErrors = maxLineErrors - 1;
#%---------------------------------------------------------------
#% Subspace minimization (only if active-set change is small).
#%---------------------------------------------------------------
doSubspaceMin = False
if subspaceMin:
g = - Aprod(r,2)
nnzX,nnzG,nnzIdx,nnzDiff = activeVars(x,g,nnzOld,options)
if not nnzDiff:
if nnzX == nnzG:
itnMaxLSQR = 20
else:
itnMaxLSQR = 5
nnzIdx = abs(x) >= optTol
doSubspaceMin = True
if doSubspaceMin:
#% LSQR parameters
damp = 1e-5
aTol = 1e-1
bTol = 1e-1
conLim = 1e12
showLSQR = 0.
ebar = np.sign(x[nnzIdx])
nebar = np.size(ebar)
Sprod = lambda y,mode: LSQRprod(Aprod,nnzIdx,ebar,n,y,mode)
dxbar, istop, itnLSQR = \
lsqr(m,nebar,Sprod,r,damp,aTol,bTol,conLim,itnMaxLSQR,showLSQR)
itnTotLSQR = itnTotLSQR + itnLSQR
if istop != 4: #% LSQR iterrations successful. Take the subspace step.
#% Push dx back into full space: dx = Z dx.
dx = np.zeros(n)
dx[nnzIdx] = dxbar - (1/nebar)*dot(np.conj(ebar.T),dxbar).dot(dxbar)
#% Find largest step to a change in sign.
block1 = nnzIdx and x < 0 and dx > +pivTol
block2 = nnzIdx and x > 0 and dx < -pivTol
alpha1 = Inf
alpha2 = Inf
if any(block1):
alpha1 = min(-x[block1] / dx[block1])
if any(block2):
alpha2 = min(-x[block2] / dx[block2])
alpha = min([1,alpha1,alpha2])
if(alpha<0):
print('ERROR: SPGL1: ALPHA LESS THAN ZERO')
if(np.dot(np.conj(ebar.T),dx[nnzIdx])>optTol):
print('ERROR: SPGL1: THING LESS THAN THING')
#% Update variables.
x = x + alpha*dx
r = b - Aprod(x,1)
f = abs(np.dot(np.conj(r),r)) / 2.
subspace = True
if options['primal_norm'](x,weights) > tau+optTol:
print('ERROR: SPGL1: PRIMAL NORM OUT OF BOUNDS')
#%---------------------------------------------------------------
#% Update gradient and compute new Barzilai-Borwein scaling.
#%---------------------------------------------------------------
if not lnErr:
g = - Aprod(r,2)
s = x - xOld
y = g - gOld
sts = np.dot(np.conj(s),s)
sty = np.dot(np.conj(s),y)
if sty <= 0:
gStep = stepMax
else:
gStep = min( stepMax, max(stepMin, sts/sty) )
else:
gStep = min( stepMax, gStep )
except ValueError: #% Detect matrix-vector multiply limit error
print('MAJOR ERROR - I NEED TO LEARN TO THROW ERRORS')
pass # DRR this is wrong, but let's do one thing at a time
# if strcmp(err.identifier,'SPGL1:MaximumMatvec')
# stat = EXIT_MATVEC_LIMIT;
# iterr = iterr - 1;
# x = xOld; f = fOld; g = gOld; r = rOld;
# break;
# else
# rethrow(err);
#%------------------------------------------------------------------
#% Update function history.
#%------------------------------------------------------------------
if singleTau or f > sigma**2 / 2.: #% Dont update if superoptimal.
lastFv[np.mod(iterr,nPrevVals)] = f.copy()
if fBest > f:
fBest = f.copy()
xBest = x.copy()
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%
#% Restore best solution (only if solving single problem).
if singleTau and f > fBest:
rNorm = np.sqrt(2.*fBest)
print('Restoring best iterrate to objective '+str(rNorm))
x = xBest.copy()
r = b - Aprod(x,1)
g = - Aprod(r,2)
gNorm = options.dual_norm(g,weights)
rNorm = np.linalg.norm(r)
#% Final cleanup before exit.
info={}
info['tau'] = tau
info['rNorm'] = rNorm
info['rGap'] = rGap
info['gNorm'] = gNorm
info['rGap'] = rGap
info['stat'] = stat
info['iterr'] = iterr
info['nProdA'] = nProdA
info['nProdAt'] = nProdAt
info['nNewton'] = nNewton
info['timeProject'] = timeProject
info['timeMatProd'] = timeMatProd
info['itnLSQR'] = itnTotLSQR
info['options'] = options
info['xNorm1'] = xNorm1[0:iterr]
info['rNorm2'] = rNorm2[0:iterr]
info['lambdaa'] = lambdaa[0:iterr]
# #% Print final output.
# if stat == EXIT_OPTIMAL:
# print('EXIT -- Optimal solution found')
# elif stat == EXIT_iterrATIONS:
# print('ERROR EXIT -- Too many iterrations')
# elif stat == EXIT_ROOT_FOUND:
# print('EXIT -- Found a root')
# elif stat == {EXIT_BPSOL_FOUND}:
# print('EXIT -- Found a BP solution')
# elif stat == {EXIT_LEAST_SQUARES}:
# print('EXIT -- Found a least-squares solution')
# elif stat == EXIT_LINE_ERROR:
# print('ERROR EXIT -- Linesearch error (#%i)\n',lnr)
# elif stat == EXIT_SUBOPTIMAL_BP:
# print('EXIT -- Found a suboptimal BP solution')
# elif stat == EXIT_MATVEC_LIMIT:
# print('EXIT -- Maximum matrix-vector operations reached')
# elif stat == EXIT_ACTIVE_SET:
# print('EXIT -- Found a possible active set')
# else:
# print('SPGL1 ERROR: Unknown termination condition')
# printf(' #%-20s: #%6i #%6s #%-20s: #%6.1f\n',...
# 'Products with A',nProdA,'','Total time (secs)',info.timeTotal);
# printf(' #%-20s: #%6i #%6s #%-20s: #%6.1f\n',...
# 'Products with A''',nProdAt,'','Project time (secs)',timeProject);
# printf(' #%-20s: #%6i #%6s #%-20s: #%6.1f\n',...
# 'Newton iterrations',nNewton,'','Mat-vec time (secs)',timeMatProd);
# printf(' #%-20s: #%6i #%6s #%-20s: #%6i\n', ...
# 'Line search its',nLineTot,'','Subspace iterrations',itnTotLSQR);
# printf('\n');
return x,r,g,info
def spg_bp(A,b,options={} ):
# %SPG_BP Solve the basis pursuit (BP) problem
# %
# % SPG_BP is designed to solve the basis pursuit problem
# %
# % (BP) minimize ||X||_1 subject to AX = B,
# %
# % where A is an M-by-N matrix, B is an M-vector, and SIGMA is a
# % nonnegative scalar. In all cases below, A can be an explicit M-by-N
# % matrix or matrix-like object for which the operations A*x and A'*y
# % are defined (i.e., matrix-vector multiplication with A and its
# % adjoint.)
# %
# % Also, A can be a function handle that points to a function with the
# % signature
# %
# % v = A(w,mode) which returns v = A *w if mode == 1;
# % v = A'*w if mode == 2.
# %
# % X = SPG_BP(A,B) solves the BP problem.
# %
# % X = SPG_BP(A,B,OPTIONS) specifies options that are set using
# % SPGSETPARMS.
# %
# % [X,R,G,INFO] = SPG_BP(A,B,OPTIONS) additionally returns the
# % residual R = B - A*X (which should be small), the objective gradient G
# % = A'*R, and an INFO structure. (See SPGL1 for a description of this
# % last output argument.)
# %
# % See also spgl1, spgSetParms, spg_bpdn, spg_lasso.
# % Copyright 2008, Ewout van den Berg and Michael P. Friedlander
# % http://www.cs.ubc.ca/labs/scl/spgl1
# % $Id: spg_bp.m 1074 2008-08-19 05:24:28Z ewout78 $
sigma = 0
tau = 0
x0 = []
x,r,g,info = spgl1(A,b,tau,sigma,x0,options);
return x,r,g,info
def spg_bpdn( A, b, sigma, options={} ):
# %SPG_BPDN Solve the basis pursuit denoise (BPDN) problem
# %
# % SPG_BPDN is designed to solve the basis pursuit denoise problem
# %
# % (BPDN) minimize ||X||_1 subject to ||A X - B|| <= SIGMA,
# %
# % where A is an M-by-N matrix, B is an M-vector, and SIGMA is a
# % nonnegative scalar. In all cases below, A can be an explicit M-by-N
# % matrix or matrix-like object for which the operations A*x and A'*y
# % are defined (i.e., matrix-vector multiplication with A and its
# % adjoint.)
# %
# % Also, A can be a function handle that points to a function with the
# % signature
# %
# % v = A(w,mode) which returns v = A *w if mode == 1;
# % v = A'*w if mode == 2.
# %
# % X = SPG_BPDN(A,B,SIGMA) solves the BPDN problem. If SIGMA=0 or
# % SIGMA=[], then the basis pursuit (BP) problem is solved; i.e., the
# % constraints in the BPDN problem are taken as AX=B.
# %
# % X = SPG_BPDN(A,B,SIGMA,OPTIONS) specifies options that are set using
# % SPGSETPARMS.
# %
# % [X,R,G,INFO] = SPG_BPDN(A,B,SIGMA,OPTIONS) additionally returns the
# % residual R = B - A*X, the objective gradient G = A'*R, and an INFO
# % structure. (See SPGL1 for a description of this last output argument.)
# %
# % See also spgl1, spgSetParms, spg_bp, spg_lasso.
# % Copyright 2008, Ewout van den Berg and Michael P. Friedlander
# % http://www.cs.ubc.ca/labs/scl/spgl1
# % $Id: spg_bpdn.m 1389 2009-05-29 18:32:33Z mpf $
tau = 0
x0 = []
return spgl1(A,b,tau,sigma,x0,options)
def spg_lasso(A,b,tau,options={} ):
# %SPG_LASSO Solve the LASSO problem
# %
# % SPG_LASSO is designed to solve the LASSO problem
# %
# % (LASSO) minimize ||AX - B||_2 subject to ||X||_1 <= tau,
# %
# % where A is an M-by-N matrix, B is an M-vector, and TAU is a
# % nonnegative scalar. In all cases below, A can be an explicit M-by-N
# % matrix or matrix-like object for which the operations A*x and A'*y
# % are defined (i.e., matrix-vector multiplication with A and its
# % adjoint.)
# %
# % Also, A can be a function handle that points to a function with the
# % signature
# %
# % v = A(w,mode) which returns v = A *w if mode == 1;
# % v = A'*w if mode == 2.
# %
# % X = SPG_LASSO(A,B,TAU) solves the LASSO problem.
# %
# % X = SPG_LASSO(A,B,TAU,OPTIONS) specifies options that are set using
# % SPGSETPARMS.
# %
# % [X,R,G,INFO] = SPG_LASSO(A,B,TAU,OPTIONS) additionally returns the
# % residual R = B - A*X, the objective gradient G = A'*R, and an INFO
# % structure. (See SPGL1 for a description of this last output argument.)
# %
# % See also spgl1, spgSetParms, spg_bp, spg_bpdn.
# % Copyright 2008, Ewout van den Berg and Michael P. Friedlander
# % http://www.cs.ubc.ca/labs/scl/spgl1
# % $Id: spg_lasso.m 1074 2008-08-19 05:24:28Z ewout78 $
sigma = 0
x0 = []
return spgl1(A,b,tau,sigma,x0,options)