-
Notifications
You must be signed in to change notification settings - Fork 1
/
techreport.bib
1603 lines (1498 loc) · 90.3 KB
/
techreport.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
% This file was created with JabRef 2.9.
% Encoding: MacRoman
%-----2023-----%
%-----2022-----%
@TECHREPORT{orozco2022MIDLmei,
author = {Rafael Orozco and Mathias Louboutin and Felix J. Herrmann},
title = {Memory Efficient Invertible Neural Networks for 3D Photoacoustic Imaging},
year = {2022},
month = {04},
number = {TR-CSE-2022-2},
abstract = {Photoacoustic imaging (PAI) can image high-resolution structures of
clinical interest such as vascularity in cancerous tumor monitoring. When
imaging human subjects, geometric restrictions force limited-view data
retrieval causing imaging artifacts. Iterative physical model based
approaches reduce artifacts but require prohibitively time consuming PDE
solves. Machine learning (ML) has accelerated PAI by combining physical
models and learned networks. However, the depth and overall power of ML
methods is limited by memory intensive training. We propose using invertible
neural networks (INNs) to alleviate memory pressure. We demonstrate INNs can
image 3D photoacoustic volumes in the setting of limited-view, noisy, and
subsampled data. The frugal constant memory usage of INNs enables us to train
an arbitrary depth of learned layers on a consumer GPU with 16GB RAM.},
keywords = {Invertible Networks, Medical Imaging, Physics and Machine Learning Hybrid, Photoacoustic Imaging},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2022/orozco2022MIDLmei/midl_2022.html}
}
%-----2021-----%
@TECHREPORT{louboutin2021NIPSmcte,
author = {Mathias Louboutin and Ali Siahkoohi and Rongrong Wang and Felix J. Herrmann},
title = {Low-memory stochastic backpropagation with multi-channel randomized trace estimation},
year = {2021},
month = {06},
number = {TR-CSE-2021-1},
abstract = {Thanks to the combination of state-of-the-art accelerators and highly
optimized open software frameworks, there has been tremendous progress in
the performance of deep neural networks. While these developments have
been responsible for many breakthroughs, progress towards solving large-scale
problems, such as video encoding and semantic segmentation in 3D,
is hampered because access to on-premise memory is often limited.
Instead of relying on (optimal) checkpointing or invertibility of the
network layers---to recover the activations during backpropagation---we
propose to approximate the gradient of convolutional layers in neural
networks with a multi-channel randomized trace estimation technique. Compared
to other methods, this approach is simple, amenable to analyses, and leads to a
greatly reduced memory footprint. Even though the randomized trace
estimation introduces stochasticity during training, we argue that this
is of little consequence as long as the induced errors are of the same
order as errors in the gradient due to the use of stochastic gradient
descent. We discuss the performance of networks trained with stochastic
backpropagation and how the error can be controlled while maximizing
memory usage and minimizing computational overhead.},
keywords = {HPC, Machine Learning, Randomized Linear Algebra, Convolutions, Low Memory},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2021/louboutin2021NIPSmcte/louboutin2021NIPSmcte.pdf},
software = {https://github.com/slimgroup/XConv}
}
%-----2020-----%
@TECHREPORT{sharan2020lsh,
author = {Shashin Sharan and Yijun Zhang and Oscar Lopez and Felix J. Herrmann},
title = {Large scale high-frequency wavefield reconstruction with recursively weighted matrix factorizations},
year = {2020},
month = {10},
number = {TR-CSE-2020-4},
abstract = {Acquiring seismic data on a regular periodic fine grid is challenging. By exploiting the low-rank approximation property of fully sampled seismic data in some transform domain, low-rank matrix completion offers a scalable way to reconstruct seismic data on a regular periodic fine grid from coarsely randomly sampled data acquired in the field. While wavefield reconstruction have been applied successfully at the lower end of the spectrum, its performance deteriorates at the higher frequencies where the low-rank assumption no longer holds rendering this type of wavefield reconstruction ineffective in situations where high resolution images are desired. We overcome this shortcoming by exploiting similarities between adjacent frequency slices explicitly. During low-rank matrix factorization, these similarities translate to alignment of subspaces of the factors, a notion we propose to employ as we reconstruct monochromatic frequency slices recursively starting at the low frequencies. While this idea is relatively simple in its core, to turn this recent insight into a successful scalable wavefield reconstruction scheme for 3D seismic requires a number of important steps. First, we need to move the weighting matrices, which encapsulate the prior information from adjacent frequency slices, from the objective to the data misfit constraint. This move considerably improves the performance of the weighted low-rank matrix factorization on which our wavefield reconstructions is based. Secondly, we introduce approximations that allow us to decouple computations on a row-by-row and column-by-column basis, which in turn allow to parallelize the alternating optimization on which our low-rank factorization relies. The combination of weighting and decoupling leads to a computationally feasible full-azimuth wavefield reconstruction scheme that scales to industry-scale problem sizes. We demonstrate the performance of the proposed parallel algorithm on a 2D field data and on a 3D synthetic dataset. In both cases our approach produces high-fidelity broadband wavefield reconstructions from severely subsampled data.},
keywords = {5D reconstruction, compressed sensing, frequency-domain, parallel, signal processing},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2020/sharan2020lsh/sharan2020lsh.html}
}
@TECHREPORT{siahkoohi2020TRfuqf,
author = {Ali Siahkoohi and Gabrio Rizzuti and Philipp A. Witte and Felix J. Herrmann},
title = {Faster Uncertainty Quantification for Inverse Problems with Conditional Normalizing Flows},
year = {2020},
month = {07},
number = {TR-CSE-2020-2},
institution = {Georgia Institute of Technology},
abstract = {In inverse problems, we often have access to data consisting of
paired samples $(x,y)\sim p_{X,Y}(x,y)$ where $y$ are partial observations of
a physical system, and $x$ represents the unknowns of the problem. Under
these circumstances, we can employ supervised training to learn a solution
$x$ and its uncertainty from the observations $y$. We refer to this problem
as the "supervised" case. However, the data $y\sim p_{Y}(y)$ collected at one
point could be distributed differently than observations $y'\sim p_{Y}'(y')$,
relevant for a current set of problems. In the context of Bayesian inference,
we propose a two-step scheme, which makes use of normalizing flows and joint
data to train a conditional generator $q_{\theta}(x|y)$ to approximate the
target posterior density $p_{X|Y}(x|y)$. Additionally, this preliminary phase
provides a density function $q_{\theta}(x|y)$, which can be recast as a prior
for the "unsupervised" problem, e.g.~when only the observations $y'\sim
p_{Y}'(y')$, a likelihood model $y'|x$, and a prior on $x'$ are known. We
then train another invertible generator with output density $q'_{\phi}(x|y')$
specifically for $y'$, allowing us to sample from the posterior
$p_{X|Y}'(x|y')$. We present some synthetic results that demonstrate
considerable training speedup when reusing the pretrained network
$q_{\theta}(x|y')$ as a warm start or preconditioning for approximating
$p_{X|Y}'(x|y')$, instead of learning from scratch. This training modality
can be interpreted as an instance of transfer learning. This result is
particularly relevant for large-scale inverse problems that employ expensive
numerical simulations.},
keywords = {deep learning, invertible networks, uncertainty quantification},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2020/siahkoohi2020TRfuqf/siahkoohi2020TRfuqf.html}
}
@TECHREPORT{louboutin2020SEGtwri,
author = {Mathias Louboutin and Gabrio Rizzuti and Felix J. Herrmann},
title = {Time-domain Wavefield Reconstruction Inversion in a TTI medium},
year = {2020},
month = {4},
number = {TR-CSE-2020-1},
institution = {Georgia Institute of Technology},
abstract = {We introduce a generalization of time-domain wavefield
reconstruction inversion to anisotropic acoustic modeling. Wavefield
reconstruction inversion has been extensively researched in recent years for
its ability to mitigate cycle skipping. The original method was formulated in
the frequency domain with acoustic isotropic physics. However,
frequency-domain modeling requires sophisticated iterative solvers that are
difficult to scale to industrial-size problems and more realistic physical
assumptions, such as tilted transverse isotropy, object of this study. The
work presented here is based on a recently proposed dual formulation of
wavefield reconstruction inversion, which allows time-domain propagator that
are suitable to both large scales and more accurate physics.},
keywords = {FWI, WRI, anisotropy, TTI},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2020/louboutin2020SEGtwri/louboutin2020SEGtwri.html},
url2 = {https://arxiv.org/pdf/2004.07355.pdf}
}
@TECHREPORT{louboutin2020SCsta,
author = {Mathias Louboutin and Fabio Luporini and Philipp A. Witte and Rhodri
Nelson and George Bisbas and Jan Thorbecke and Felix J. Herrmann and Gerard
Gorman},
title = {Scaling through abstractions – high-performance vectorial wave
simulations for seismic inversion with Devito},
year = {2020},
month = {4},
number = {TR-CSE-2020-3},
institution = {Georgia Institute of Technology},
abstract = {[Devito] is an open-source Python project based on domain-specific
language and compiler technology. Driven by the requirements of rapid HPC
applications development in exploration seismology, the language and compiler
have evolved significantly since inception. Sophisticated boundary
conditions, tensor contractions, sparse operations and features such as
staggered grids and sub-domains are all supported; operators of essentially
arbitrary complexity can be generated. To accommodate this flexibility whilst
ensuring performance, data dependency analysis is utilized to schedule loops
and detect computational-properties such as parallelism. In this article, the
generation and simulation of MPI-parallel propagators (along with their
adjoints) for the pseudo-acoustic wave-equation in tilted transverse
isotropic media and the elastic wave-equation are presented. Simulations are
carried out on industry scale synthetic models in a HPC Cloud system and
reach a performance of 28TFLOP/s, hence demonstrating Devito's suitability
for production-grade seismic inversion problems.},
keywords = {HPC, Devito, finite-difference, large-scale, RTM, elastic, TTI},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2020/louboutin2020SCsta/louboutin2020SCsta.html}
}
%-----2019-----%
@TECHREPORT{siahkoohi2019TRnna,
author = {Ali Siahkoohi and Mathias Louboutin and Felix J. Herrmann},
title = {Neural network augmented wave-equation simulation},
year = {2019},
month = {09},
number = {TR-CSE-2019-1},
institution = {Georgia Institute of Technology},
abstract = {Accurate forward modeling is important for solving inverse
problems. An inaccurate wave-equation simulation, as a forward operator, will
offset the results obtained via inversion. In this work, we consider the case
where we deal with incomplete physics. One proxy of incomplete physics is an
inaccurate discretization of Laplacian in simulation of wave equation via
finite-difference method. We exploit intrinsic one-to-one similarities
between timestepping algorithm with Convolutional Neural Networks (CNNs), and
propose to intersperse CNNs between low-fidelity timesteps. Augmenting neural
networks with low-fidelity timestepping algorithms may allow us to take large
timesteps while limiting the numerical dispersion artifacts. While simulating
the wave-equation with low-fidelity timestepping algorithm, by correcting the
wavefield several time during propagation, we hope to limit the numerical
dispersion artifact introduced by a poor discretization of the Laplacian. As
a proof of concept, we demonstrate this principle by correcting for numerical
dispersion by keeping the velocity model fixed, and varying the source
locations to generate training and testing pairs for our supervised learning
algorithm.},
keywords = {wave equation, deep learning, finite difference},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2019/siahkoohi2019TRnna/siahkoohi2019TRnna.html},
url2 = {https://arxiv.org/pdf/1910.00925.pdf}
}
%-----2017-----%
@TECHREPORT{louboutin2016OGHPCocp,
author = {Mathias Louboutin and Gerard Gorman and Felix J. Herrmann},
title = {Optimizing the computational performance of time-domain modelling---leveraging multiple right-hand-sides},
year = {2017},
number = {TR-EOAS-2017-2},
institution = {UBC},
abstract = {Exploration geophysics heavily relies upon fast solvers
for the wave-equation and its adjoint. The main
computational cost of a wave-equation solver is to
compute the Laplacian, or more complex
finite-difference operators, at every time step. The
performance of many discretizations is limited by
the relatively low operational intensity (number of
floating point operations divided by memory traffic)
of the finite-difference stencil. Solving the
wave-equation for multiple sources/right-hand-sides
(RHSs) at once mitigates this problem by increasing
the operational intensity. This is implemented by
rewriting the classical matrix-vector product into a
matrix-matrix product where each column of the
second matrix represent the solution wavefield for
each given source. This minor modification to the
solver is shown to achieve a 2-4 times speedup
compared to a single source solver. We concentrate
in this paper on acoustic modelling, but our
approach can easily be extended to anisotropic or
elastic cases for both forward and adjoint
modelling.},
keywords = {finite differences, HPC, modelling, time domain},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2017/louboutin2016OGHPCocp/louboutin2016OGHPCocp.pdf}
}
@TECHREPORT{witte2017TRcls,
author = {Philipp A. Witte and Mengmeng Yang and Felix J. Herrmann},
title = {Compressive least-squares migration with source estimation},
year = {2017},
number = {TR-EOAS-2017-3},
institution = {UBC},
abstract = {Least-squares reverse-time migration is a powerful
approach for true-amplitude seismic imaging of
complex geological structures. The successful
application of this method is hindered by its
exceedingly large computational cost and required
prior knowledge of the generally unknown source
wavelet. We address these problems by introducing an
algorithm for low-cost sparsity-promoting
least-squares migration with source estimation. We
adapt a recent algorithm from sparse optimization,
which allows to work with randomized subsets of
shots during each iteration of least-squares
migration, while still converging to an
artifact-free solution. We modify the algorithm to
incorporate on-the-fly source estimation through
variable projection, which lets us estimate the
wavelet without additional PDE solves. The algorithm
is easy to implement and allows imaging at a
fraction of the cost of conventional least squares
reverse-time migration, requiring only around two
passes trough the data, making the method feasible
for large-scale imaging problems with unknown source
wavelets.},
keywords = {LSRTM, source estimation, sparsity, migration, time domain},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2017/witte2017TRcls/witte2017TRcls.html}
}
@TECHREPORT{witte2016OGHPClst,
author = {Philipp A. Witte and Mathias Louboutin and Gerard Gorman and Felix J. Herrmann},
title = {A large-scale time-domain seismic modeling and inversion workflow in {Julia}},
year = {2017},
number = {TR-EOAS-2017-1},
institution = {UBC},
abstract = {We present our initial steps towards the development of
a large-scale seismic modeling workflow in Julia
that provides a framework for wave equation based
inversion methods like full waveform inversion or
least squares migration. Our framework is based on
the Devito, a finite difference domain specific
language compiler that generates highly optimized
and parallel code. We develop a flexible workflow
that is based on abstract matrixfree linear
operators and enables developers to write code that
closely resembles the underlying math, while at the
same time leveraging highly optimized wave equation
solvers, allowing us to solve large-scale
three-dimensional inverse problems.},
keywords = {HPC, inversion, modelling},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2017/witte2016OGHPClst/witte2016OGHPClst.pdf}
}
%-----2016-----%
@TECHREPORT{louboutin2016SEGocp,
author = {Mathias Louboutin and Gerard Gorman and Felix J. Herrmann},
title = {Optimizing the computational performance and maintainability of time-domain modelling---leveraging multiple right-hand-sides},
year = {2016},
month = {06},
number = {TR-EOAS-2016-2},
institution = {UBC},
abstract = {Numerical solvers for the wave equation are a key
component of Full-Waveform Inversion (FWI) and
Reverse-Time Migration (RTM). The main computational
cost of a wave-equation solver stems from the
computation of the Laplacian at each time step. When
using a finite difference discretization this can be
characterized as a structured grid computation
within Colella's Seven Dwarfs. Independent of the
degree of parallelization the performance will be
limited by the relatively low operational intensity
(number of operations divided by memory traffic) of
finite-difference stencils, that is so say that the
method is memory bandwidth bound. For this reason
many developers have focused on porting their code
to platforms that have higher memory bandwidth, such
as GPU's, or put significant effort into highly
intrusive optimisations. However, these
optimisations rarely strike the right performance vs
productivity balance as the software becomes less
maintainable and extensible. By solving the wave
equation for multiple sources/right-hand-sides
(RHSs) at once, we overcome this problem arriving at
a time-stepping solver with higher operational
intensity. In essence, we arrive at this result by
turning the usual matrix-vector products into a
matrix-matrix products where the first matrix
implements the discretized wave equation and each
column of the second matrix contain separate
wavefields for each given source. By making this
relatively minor change to the solver we readily
achieved a $\times{2}$ speedup. While we limit
ourselves to acoustic modeling, our approach can
easily be extended to the anisotropic or elastic
cases.},
keywords = {modelling, 3D, time domain},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2016/louboutin2016SEGocp/louboutin2016SEGocp.html}
}
@TECHREPORT{herrmann2016SLBors,
author = {Felix J. Herrmann},
title = {Overview research at the {SINBAD Consortium}},
year = {2016},
month = {03},
number = {TR-EOAS-2016-1},
institution = {UBC},
keywords = {presentation, SLIM},
note = {Presented at a seminar at Schlumberger Gould, Cambridge on March 17, 2016.},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2016/SLB/herrmann2016SLBors/herrmann2016SLBors.pdf}
}
%-----2015-----%
@TECHREPORT{kumar2015EAGElse,
author = {Rajiv Kumar and Ning Tu and Tristan van Leeuwen and Felix J. Herrmann},
title = {Least-squares extended imaging with surface-related multiples},
year = {2015},
month = {01},
number = {TR-EOAS-2015-1},
institution = {UBC},
abstract = {Common image gathers are used in building velocity
models, inverting for anisotropy parameters, and
analyzing reservoir attributes. Typically, only
primary reflections are used to form image gathers
as multiples can cause artifacts that interfere with
the events of interest. However, it has been shown
that multiples can actually provide extra
illumination of the subsurface, especially for
delineating the near-surface features. In this
paper, we aim to form common image gathers directly
from the data with surface related multiples by
applying concepts that have been used to
successfully deal with surface-related multiples in
imaging. We achieve this by effectively inverting an
extended migration operator. This results in
extended images with better near-surface
illumination that are free of artifacts that can
hamper velocity analysis. In addition, being able to
generate extended images directly from the total
data avoids the need for (time-consuming)
pre-processing. Synthetic examples on a layered
model show that the proposed formulation is
promising.},
keywords = {surface-related multiples, image gathers},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/kumar2015EAGElse/kumar2015EAGElse.html}
}
@TECHREPORT{kumar2015EAGEtjm,
author = {Rajiv Kumar and Haneet Wason and Felix J. Herrmann},
title = {Time-jittered marine acquisition: low-rank v/s sparsity},
year = {2015},
month = {01},
number = {TR-EOAS-2015-2},
institution = {UBC},
abstract = {Time-jittered simultaneous marine acquisition has been
recognized as an economic way of improving the
spatial sampling, and speedup acquisition, where a
single (or multiple) source vessel fires at --
jittered source locations and instances in time. It
has been shown in the past that this problem can be
setup as a -- compressed sensing problem, where
conventional seismic data is reconstructed from
blended data via a sparsity-promoting optimization
formulation. While the recovery quality of deblended
data is very good, the recovery process is
computationally very expensive. In this paper, we
present a computationally efficient
rank-minimization algorithm to deblend the seismic
data. The proposed algorithm is suitable for
large-scale seismic data, since it avoids SVD
computations and uses a low-rank factorized
formulation instead. Results are illustrated with
simulations of time-jittered marine acquisition,
which translates to jittered source locations for a
given speed of the source vessel, for a single
source vessel with two airgun arrays.},
keywords = {deblending, low-rank},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/kumar2015EAGEtjm/kumar2015EAGEtjm.html}
}
@TECHREPORT{lago2015EAGEtrg,
author = {Rafael Lago and Felix J. Herrmann},
title = {Towards a robust geometric multigrid scheme for {Helmholtz} equation},
year = {2015},
month = {01},
number = {TR-EOAS-2015-3},
institution = {UBC},
abstract = {We discuss an improvement of existing multigrid
techniques for the solution of the time harmonic
wave equation targeting application to seismic
inversion and imaging, using non-traditional
smoothing and coarse correction techniques, namely
the CGMN and CRMN methods. We aim at developing a
multigrid scheme to be used as a preconditioner for
FGMRES showing less sensibility to changes in the
discretization of the operator. We compare this
multigrid scheme with recent developments in the
multigrid field obtaining very satisfactory
results. Our numerical experiments using SEG/EAGE
Overthrust velocity model showing not only more
robustness when switching from a basic 7 points
stencil to a more compact 27 points stencil, but
also a considerable reduction in the number of
preconditioning steps required to attain
convergence, a result encouraging further
investigation.},
keywords = {Helmholtz, multigrid, CGMN, CRMN},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/lago2015EAGEtrg/lago2015EAGEtrg.pdf}
}
@TECHREPORT{peters2015EAGErwi,
author = {Bas Peters and Brendan R. Smithyman and Felix J. Herrmann},
title = {Regularizing waveform inversion by projection onto intersections of convex sets},
year = {2015},
month = {01},
number = {TR-EOAS-2015-4},
institution = {UBC},
abstract = {A framework is proposed for regularizing the waveform
inversion problem by projections onto intersections
of convex sets. Multiple pieces of prior information
about the geology are represented by multiple convex
sets, for example limits on the velocity or minimum
smoothness conditions on the model. The data-misfit
is then minimized, such that the estimated model is
always in the intersection of the convex
sets. Therefore, it is clear what properties the
estimated model will have at each iteration. This
approach does not require any quadratic penalties to
be used and thus avoids the known problems and
limitations of those types of penalties. It is shown
that by formulating waveform inversion as a
constrained problem, regularization ideas such as
Tikhonov regularization and gradient filtering can
be incorporated into one framework. The algorithm is
generally applicable, in the sense that it works
with any (differentiable) objective function,
several gradient and quasi-Newton based solvers and
does not require significant additional
computation. The method is demonstrated on the
inversion of very noisy synthetic data and vertical
seismic profiling field data.},
keywords = {waveform inversion, regularization, convex sets},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/peters2015EAGErwi/peters2015EAGErwi.html}
}
@TECHREPORT{esser2015tvwri,
author = {Ernie Esser and Lluís Guasch and Tristan van Leeuwen and Aleksandr Y. Aravkin and Felix J. Herrmann},
title = {Total variation regularization strategies in full waveform inversion for improving robustness to noise, limited data and poor initializations},
year = {2015},
month = {06},
number = {TR-EOAS-2015-5},
institution = {UBC},
abstract = {We propose an extended full waveform inversion
formulation that includes convex constraints on the
model. In particular, we show how to simultaneously
constrain the total variation of the slowness
squared while enforcing bound constraints to keep it
within a physically realistic range. Synthetic
experiments show that including total variation
regularization can improve the recovery of a high
velocity perturbation to a smooth background model,
removing artifacts caused by noise and limited data.
Total variation-like constraints can make the
inversion results significantly more robust to a
poor initial model, leading to reasonable results in
some cases where unconstrained variants of the
method completely fail. Numerical results are
presented for portions of the SEG/EAGE salt model
and the 2004 BP velocity
benchmark. ***Disclaimer.*** *This technical report
is ongoing work (and posted as is except for the
addition of another author) of the late John "Ernie"
Esser (May 19, 1980 - March 8, 2015), who passed
away under tragic circumstances. We will work hard
to finalize and submit this work to the peer-review
literature.* Felix J. Herrmann},
keywords = {Wavefield Reconstruction Inversion, total-variation, hinge loss, cones constraints},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/esser2015tvwri/esser2015tvwri.html}
}
@TECHREPORT{witte2015TRoam,
author = {Philipp A. Witte and Mathias Louboutin and Felix J. Herrmann},
title = {Overview on anisotropic modeling and inversion},
year = {2015},
month = {08},
number = {TR-EOAS-2015-6},
institution = {UBC},
abstract = {This note provides an overview on strategies for
modeling and inversion with the anisotropic wave
equation. Since linear and non-linear inversion
methods like least squares RTM and Full Waveform
Inversion depend on matching observed field data
with synthetically modelled data, accounting for
anisotropy effects is necessary in order to
accurately match waveforms at long offsets and
propagation times. In this note, the two main
strategies for anisotropic modelling by solving
either a pseudo acoustic wave equation or a pure
quasi-P-wave equation are discussed and an inversion
workflow using the pure quasi-P-wave equation is
provided. In particular, we derive the exact adjoint
of the anisotropic forward modelling and jacobian
operator and give a detailled describtion of their
implementation. The anistropic FWI workflow is
tested on a sythetic data example.},
keywords = {full waveform inversion, anisotropy, modeling},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/witte2015TRoam/witte2015TRoam.html}
}
@TECHREPORT{peters2015SEGrwi,
author = {Bas Peters and Zhilong Fang and Brendan R. Smithyman and Felix J. Herrmann},
title = {Regularizing waveform inversion by projections onto convex sets --- application to the {2D} {Chevron} 2014 synthetic blind-test dataset},
year = {2015},
month = {06},
number = {TR-EOAS-2015-7},
institution = {UBC},
abstract = {A framework is proposed for regularizing the waveform
inversion problem by projections onto intersections
of convex sets. Multiple pieces of prior information
about the geology are represented by multiple convex
sets, for example limits on the velocity or minimum
smoothness conditions on the model. The data-misfit
is then minimized, such that the estimated model is
always in the intersection of the convex
sets. Therefore, it is clear what properties the
estimated model will have at each iteration. This
approach does not require any quadratic penalties to
be used and thus avoids the known problems and
limitations of those types of penalties. It is shown
that by formulating waveform inversion as a
constrained problem, regularization ideas such as
Tikhonov regularization and gradient filtering can
be incorporated into one framework. The algorithm is
generally applicable, in the sense that it works
with any (differentiable) objective function and
does not require significant additional
computation. The method is demonstrated on the
inversion of the 2D marine isotropic elastic
synthetic seismic benchmark by Chevron using an
acoustic modeling code. To highlight the effect of
the projections, we apply no data pre-processing.},
keywords = {SEG, Waveform inversion, regularization, projection, blind-test, Wavefield Reconstruction Inversion},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2015/peters2015SEGrwi/peters2015SEGrwi.html}
}
%-----2014-----%
@TECHREPORT{esser2014sln,
author = {Ernie Esser},
title = {Some lifting notes},
year = {2014},
month = {02},
number = {TR-EOAS-2014-1},
institution = {UBC},
keywords = {lifting, nonconvex quadratic problems, convex semidefinite programming, low rank},
note = {written on February 15, 2014},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/esser2014sln/esser2014sln.pdf}
}
@TECHREPORT{herrmann2014pmpde,
author = {Felix J. Herrmann and Tristan van Leeuwen},
title = {A penalty method for {PDE}-constrained optimization},
institution = {UBC},
year = {2014},
month = {10},
day = {30},
number = {WO 2014/172787},
type = {Patent},
yearfiled = {2014},
monthfiled = {04},
dayfiled = {23},
abstract = {The invention is directed to a computer-implemented
method for obtaining a physical model having
physical model parameters wherein solutions to one
or more partial-differential-equations (PDE's) are
calculated ans wherein (i) an appropriate initial
model is selected, (ii) setup a system of equations
referred to as the data-augmented PDE for the field,
comprising of the discretized PDE, the sampling
operator, the source function and the observed data,
and solve the data-augmented PDE in a suitable
manner to obtain a field that both satisfies the PDE
and fits the data to some degree, (iii) setup a
system of equations by using the PDE, the source
function and the field obtained in step (ii) and
solve this system of equations in a suitable manner
to obtain an update of the physical model parameters
and repeat steps (ii)-(iii) until a predetermined
stopping criterion is met.},
keywords = {penalty method, optimization, patent},
note = {(International publication date 30 October 2014. International publication number WO 2014/172787.)},
url = {https://slim.gatech.edu/Publications/Public/Patents/2014/herrmann2014pmpde/herrmann2014pmpde_WO2014172787.pdf},
url2 = {http://patentscope.wipo.int/search/en/WO2014172787}
}
@TECHREPORT{slim2014NSERCapp,
author = {Felix J. Herrmann},
title = {{NSERC} 2014 {DNOISE} application},
year = {2014},
number = {TR-EOAS-2014-7},
institution = {UBC},
abstract = {This current proposal describes a comprehensive
five-year continuation of our research project in
dynamic nonlinear optimization for imaging in
seismic exploration (DNOISE). DNOISE III—Exploration
Seismology in the Petascale Age builds on the proven
track record of our multidisciplinary research team
that conducts transformative research in the fields
of seismic-data acquisition, processing, and
wave-equation based inversion. The overarching goals
of the DNOISE series of projects can be simply
summarized as: ``How to image more deeply and with
more detail?" and ``How to do more with less data?"
Also, to help overcome the current substantial
challenges in the oil and gas industry, we maintain
this focus with more specific follow-up questions
such as: ``How can we control costs and remove
acquisition-related artifacts in 3D (time-lapse)
seismic data sets?" and ``How can we replace
conventional seismic data processing with
wave-equation based inversion, control computational
costs, assess uncertainties, extract reservoir
information and remove sensitivity to starting
models?" To answer these questions, we have
assembled an expanded cross-disciplinary research
team with backgrounds in scientific computing (SC),
machine learning (ML), compressive sensing (CS),
hardware design, and computational and observational
exploration seismology (ES). With this team, we will
continue to drive innovations in ES by utilizing our
unparalleled access to high-performance computing
(HPC), our expertise and experience in CS and
wave-equation based inversion (WEI) and our proven
abilities in incorporating our research findings
into practical scalable software of our inversion
solutions.},
keywords = {NSERC, DNOISE},
url = {https://slim.gatech.edu/Publications/Public/TechReport/NSERC/2014/DNOISEIII/crd.html}
}
@TECHREPORT{tschannen2014tdl,
author = {Valentin Tschannen and Zhilong Fang and Felix J. Herrmann},
title = {Time domain least squares migration and dimensionality reduction},
institution = {UBC},
year = {2014},
month = {06},
number = {TR-EOAS-2014-9},
abstract = {Least-squares seismic migration (LSM) is a wave equation based linearized inversion problem relying on the minimization of a least-squares misfit function, with respect to the medium perturbation, between recorded and modeled wavefields. Today’s challenges in Hydrocarbon ex- ploration are to build high resolution images of more and more complicated geological reservoirs, which requires to handle very large systems of equations. The extreme size of the problem com- bined with the fact that it is ill-conditioned make LSM not yet feasible for industrial purposes. To overcome this "curse of dimensionality", dimension reduction and divide-and-conquer tech- niques that aim to decrease the computation time and the required memory, while conserving the image quality, have recently been developed. By borrowing ideas from stochastic optimiza- tion and compressive sensing, the imaging problem is reformulated as an L1-regularized, sparsity promoted LSM. The idea is to take advantage of the compressibility of the model perturbation in the curvelet domain and to work on series of smaller subproblems each involving a small ran- domized subset of data. We try two different subset sampling strategies, artificial randomized simultaneous sources experiments ("supershots") and drawing sequential shots firing at random source locations. These subsets are changed after each subproblem is solved. In both cases we obtain good migration results at significantly reduced computational cost. Application of these methods to a complicated synthetic model yields to encouraging results, underlining the usefulness of sparsity promotion and randomization in time stepping formulation.
},
keywords = {Wave equation migration, sparsity promotion, compressive sensing, stochastic optimization},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/tschannen2014tdl/tschannen2014tdl.pdf}
}
@TECHREPORT{esser2014SEGsgp,
author = {Ernie Esser and Tristan van Leeuwen and Aleksandr Y. Aravkin and Felix J. Herrmann},
title = {A scaled gradient projection method for total variation regularized full waveform inversion},
year = {2014},
month = {04},
number = {TR-EOAS-2014-2},
institution = {UBC},
abstract = {We propose an extended full waveform inversion
formulation that includes convex constraints on the
model. In particular, we show how to simultaneously
constrain the total variation of the slowness
squared while enforcing bound constraints to keep it
within a physically realistic range. Synthetic
experiments show that including total variation
regularization can improve the recovery of a high
velocity perturbation to a smooth background model.},
keywords = {full waveform inversion, convex constraints, total variation regularization},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/esser2014SEGsgp/esser2014SEGsgp.html}
}
@TECHREPORT{zfang2014SEGsqn,
author = {Zhilong Fang and Felix J. Herrmann},
title = {A stochastic quasi-Newton {McMC} method for uncertainty quantification of full-waveform inversion},
year = {2014},
month = {04},
number = {TR-EOAS-2014-6},
institution = {UBC},
abstract = {In this work we propose a stochastic quasi-Newton Markov
chain Monte Carlo (McMC) method to quantify the
uncertainty of full-waveform inversion (FWI). We
formulate the uncertainty quantification problem in
the framework of the Bayesian inference, which
formulates the posterior probability as the
conditional probability of the model given the
observed data. The Metropolis-Hasting algorithm is
used to generate samples satisfying the posterior
probability density function (pdf) to quantify the
uncertainty. However it suffers from the challenge
to construct a proposal distribution that
simultaneously provides a good representation of the
true posterior pdf and is easy to manipulate. To
address this challenge, we propose a stochastic
quasi-Newton McMC method, which relies on the fact
that the Hessian of the deterministic problem is
equivalent to the inverse of the covariance matrix
of the posterior pdf. The l-BFGS (limited-memory
Broyden–Fletcher–Goldfarb–Shanno) Hessian is used to
approximate the inverse of the covariance matrix
efficiently, and the randomized source sub-sampling
strategy is used to reduce the computational cost of
evaluating the posterior pdf and constructing the
l-BFGS Hessian. Numerical experiments show the
capability of this stochastic quasi-Newton McMC
method to quantify the uncertainty of FWI with a
considerable low cost.},
keywords = {FWI, uncertainty quantification, quasi-Newton, McMC},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/zfang2014SEGsqn/zfang2014SEGsqn.html}
}
@TECHREPORT{wang2014SEGfwi,
author = {Rongrong Wang and Ozgur Yilmaz and Felix J. Herrmann},
title = {Full waveform inversion with interferometric measurements},
year = {2014},
month = {04},
number = {TR-EOAS-2014-5},
institution = {UBC},
abstract = {In this note, we design new misfit functions for
full-waveform inversion by using interferometric
measurements to reduce sensitivity to phase
errors. Though established within a completely
different setting from the linear case, we obtain a
similar observation: the interferometry can improve
robustness under certain modeling errors. Moreover,
in order to deal with errors on both source and
receiver sides, we propose a higher order
interferometry, which, as a generalization of the
usual definition, involves the cross correlation of
four traces. A proof of principle simulations is
included on a stylized example.},
keywords = {FWI},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/wang2014SEGfwi/wang2014SEGfwi.html}
}
@TECHREPORT{kumar2014SEGmcu,
author = {Rajiv Kumar and Oscar Lopez and Ernie Esser and Felix J. Herrmann},
title = {Matrix completion on unstructured grids : 2-D seismic data regularization and interpolation},
year = {2014},
month = {04},
number = {TR-EOAS-2014-3},
institution = {UBC},
abstract = {Seismic data interpolation via rank-minimization
techniques has been recently introduced in the
seismic community. All the existing
rank-minimization techniques assume the recording
locations to be on a regular grid, e.g. sampled
periodically, but seismic data are typically
irregularly sampled along spatial axes. Other than
the irregularity of the sampled grid, we often have
missing data. In this paper, we study the effect of
grid irregularity to conduct matrix completion on a
regular grid for unstructured data. We propose an
improvement of existing rank-minimization techniques
to do regularization. We also demonstrate that we
can perform seismic data regularization and
interpolation simultaneously. We illustrate the
advantages of the modification using a real seismic
line from the Gulf of Suez to obtain high quality
results for regularization and interpolation, a key
application in exploration geophysics.},
keywords = {regularization, interpolation, matrix completion, NFFT},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/kumar2014SEGmcu/kumar2014SEGmcu.html}
}
@TECHREPORT{smithyman2014SEGjfw,
author = {Brendan R. Smithyman and Bas Peters and Bryan DeVault and Felix J. Herrmann},
title = {Joint full-waveform inversion of on-land surface and {VSP} data from the {Permian} {Basin}},
year = {2014},
month = {04},
number = {TR-EOAS-2014-4},
institution = {UBC},
abstract = {Full-waveform Inversion is applied to generate a
high-resolution model of P-wave velocity for a site
in the Permian Basin, Texas, USA. This investigation
jointly inverts seismic waveforms from a surface 3-D
vibroseis surface seismic survey and a co-located
3-D Vertical Seismic Profiling (VSP) survey, which
shared common source Vibration Points (VPs). The
resulting velocity model captures features that were
not resolvable by conventional migration velocity
analysis.},
keywords = {full-waveform inversion, seismic, land, vibroseis, downhole receivers},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2014/smithyman2014SEGjfw/smithyman2014SEGjfw.html}
}
@TECHREPORT{slim2014NSERCpr,
author = {Felix J. Herrmann},
title = {{NSERC} 2014 {DNOISE} progress report},
year = {2014},
number = {TR-EOAS-2014-8},
institution = {UBC},
abstract = {As we entered the second half of the DNOISE II project,
we are happy to report that we have made significant
progress on several fronts. Firstly, our work on
seismic data acquisition with compressive sensing is
becoming widely recognized. For instance,
ConocoPhilips ran a highly successful field trial on
Marine acquisition with compressive sensing and
obtained significant improvements compared to
standard production (see figure below). Moreover,
one of the main outcomes of this year’s EAGE
workshop was that industry is ready to adapt
randomized sampling as a new acquisition
paradigm. Needless to say this is a big success for
what we have been trying to accomplish with DNOISE
II. Finally, we have made a breakthrough in the
application of randomized sampling in 4-D seismic,
which is receiving a lot of interest from
industry. Secondly, our work on large-scale
optimization in the context of wave-equation based
inversion is also increasingly widely adapted. For
instance, our batching techniques are making the
difference between making a loss or profit for a
large contractor company active in the area of
full-waveform inversion. We also continued to make
progress in exciting new directions that go beyond
sparsity promotion and which allow us to exploit
other types of structure within the data, such as
low-rank for matrices or hierarchical Tucker formats
for tensors. Application of these techniques show
excellent results and in certain cases, such as
source separation problems with small dithering,
show significant improvements over transform-domain
methods. Thirdly, we continued to make significant
progress in wave-equation based inversion. We
extended our new penalty-based formulation now
called Wavefield Reconstruction Inversion/Imaging to
include total-variation regularization and density
variations. We also continued to make progress on
multiples, imaging with multiples and 3-D
full-waveform inversion. Statoil is the latest
company to join and we have several other companies
that have shown a keen interest. We also received
substantial in-kind contributions including a
license to WesternGeco’s iOmega and HPC equipment
discounts. After many years of support BP decided
unfortunately to no longer support SINBAD quoting
financial headwind related to the Deep horizon
disaster. On a more positive note, we are extremely
happy to report major progress on our efforts to
secure access to high-performance compute, including
renewed funding from NSERC and our involvement in
the International Inversion Initiative in Brazil. 9
peer-reviewed journal publications have resulted
from our work within the reporting period, with a
further 6 submitted, and DNOISE members disseminated
the results of our research at 49 major national and
international conference presentations. On the HQP
training side, 4 MSc students have recently
graduated, with one obtaining a position with CGG
Calgary, and we added 4 postdocs and 3 PhD students
to our team in September 2014, greatly increasing
our research capacity. As can be seen from the
report below, we are well on schedule and on certain
topics well beyond the milestones included in the
original proposal. With the purchase of the new
cluster we expect to see a surge of activity in
extending our algorithms to 3D. With this increased
capacity, we continue to be in an excellent position
to make fundamental contributions to the fields of
seismic data acquisition, processing, and
wave-equation based inversion. In the sections
below, we give a detailed overview of the research
and publication activities of the different members
of the group and how these relate to the objectives
of the grant, to industrial uptake, and to
outreach. Unless stated otherwise the students and
PDFs are (co)-supervised by the PI. We refer to the
publications section 4.0 for a complete list of our
presentations, conference proceedings, and journal
publications. We also refer to our mindmap, which
clearly establishes connections between the
different research topics we have embarked upon as
part of the DNOISE II project.},
keywords = {NSERC, DNOISE},
url = {https://slim.gatech.edu/Publications/Public/TechReport/NSERC/2014/Progress_Report_2014.html}
}
%-----2013-----%
@TECHREPORT{li2013EAGEwebmplijsp,
author = {Xiang Li and Felix J. Herrmann},
title = {Wave-equation based multi-parameter linearized inversion with joint-sparsity promotion},
year = {2013},
number = {TR-EOAS-2013-1},
institution = {UBC},
abstract = {The successful application of linearized inversion is
affected by the prohibitive size of the data,
computational resources required, and how accurately
the model parameters reflects the real Earth
properties. The issue of data size and computational
resources can be addressed by combining ideas from
sparsity promoting and stochastic optimization,
which can allow us to invert model perturbation with
a small subset of the data, yielding a few PDE
solves for the inversion. In this abstract, we are
aiming at addressing the issue of accuracy of model
parameters by inverting density and velocity
simultaneously rather than only using velocity. As a
matter of face, the effects of density and velocity
variations towards the wavefield are very similar,
which will cause energy leakage between density and
velocity images. To overcome this issue, we proposed
a incoherence enhanced method that can reduce the
similarity between the effect of density and
velocity. Moreover, the location of structural
variations in velocity and density are often
overlapped in geological setting, thus in this
abstract, we also exploit this property with
joint-sparsity promoting to further improve the
imaging result.},
keywords = {linearized inversion, incoherence enhancement, joint-sparsity},
month = {01},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2013/li2013EAGEwebmplijsp/li2013EAGEwebmplijsp.pdf}
}
@TECHREPORT{kumar2013ICMLlr,
author = {Aleksandr Y. Aravkin and Rajiv Kumar and Hassan Mansour and Ben Recht and Felix J. Herrmann},
title = {An {SVD}-free {Pareto} curve approach to rank minimization},
year = {2013},
number = {TR-EOAS-2013-2},
institution = {UBC},
abstract = {Recent SVD-free matrix factorization formulations have
enabled rank optimization for extremely large-scale
systems (millions of rows and columns). In this
paper, we consider rank-regularized formulations
that only require a target data-fitting error level,
and propose an algorithm for the corresponding
problem. We illustrate the advantages of the new
approach using the Netflix problem, and use it to
obtain high quality results for seismic trace
interpolation, a key application in exploration
geophysics. We show that factor rank can be easily
adjusted as the inversion proceeds, and propose a
weighted extension that allows known subspace
information to improve the results of matrix
completion formulations. Using these methods, we
obtain high-quality reconstructions for large scale
seismic interpolation problems with real data.},
keywords = {interpolation, low rank},
month = {02},
url = {https://slim.gatech.edu/Publications/Public/TechReport/2013/kumar2013ICMLlr/kumar2013ICMLlr.pdf}
}
@TECHREPORT{oghenekohwo2013SEGtlswrs,
author = {Felix Oghenekohwo and Felix J. Herrmann},
title = {Time-lapse seismics with randomized sampling},