-
Notifications
You must be signed in to change notification settings - Fork 1
/
article.bib
4061 lines (3865 loc) · 257 KB
/
article.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
% This file was created with JabRef 2.8.1.
% Encoding: MacRoman
%----- 2024 -----%
@ARTICLE{yin2024wiser,
author = {Ziyi Yin and Rafael Orozco and Felix J. Herrmann},
title = {WISER: multimodal variational inference for full-waveform inversion without dimensionality reduction},
year = {2024},
month = {11},
abstract = {We present a semi-amortized variational inference framework designed for computationally feasible uncertainty quantification in 2D full-waveform inversion to explore the multimodal posterior distribution without dimensionality reduction. The framework is called WISER, short for full-Waveform variational Inference via Subsurface Extensions with Refinements. WISER leverages the power of generative artificial intelligence to perform approximate amortized inference that is low-cost albeit showing an amortization gap. This gap is closed through non-amortized refinements that make frugal use of acoustic wave physics. Case studies illustrate that WISER is capable of full-resolution, computationally feasible, and reliable uncertainty estimates of velocity models and imaged reflectivities.},
keywords = {WISER, WISE, FWI, imaging, CIG, conditional normalizing flows, Bayesian inference, amortized variational inference, uncertainty quantification, deep learning, inverse problems, summary statistics, MVA},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2024/yin2024wiser/WISER.html},
note = {(Accepted for publication in GEOPHYSICS)},
doi = {10.48550/arXiv.2405.10327}
}
@ARTICLE{orozco2023invnet,
author = {Rafael Orozco and Philipp A. Witte and Mathias Louboutin and Ali Siahkoohi and Gabrio Rizzuti and Bas Peters and Felix J. Herrmann},
title = {InvertibleNetworks.jl: A Julia package for scalable normalizing flows},
journal = {Journal of Open Source Software},
year = {2024},
month = {7},
volume = {9},
abstract = {InvertibleNetworks.jl is a Julia package designed for the scalable implementation of normalizing flows, a method for density estimation and sampling in high-dimensional distributions. This package excels in memory efficiency by leveraging the inherent invertibility of normalizing flows, which significantly reduces memory requirements during backpropagation compared to existing normalizing flow packages that rely on automatic differentiation frameworks. InvertibleNetworks.jl has been adapted for diverse applications, including seismic imaging, medical imaging, and CO2 monitoring, demonstrating its effectiveness in learning high-dimensional distributions.},
keywords = {normalizing flows, software, conditional normalizing flows, Bayesian inference, uncertainty quantification, deep learning, inverse problems, memory, HPC, computing},
url = {https://doi.org/10.21105/joss.06554},
doi = {10.21105/joss.06554}
}
@ARTICLE{yin2024tfp,
author = {Ziyi Yin and Mathias Louboutin and Olav Møyner and Felix J. Herrmann},
title = {Time-lapse full-waveform permeability inversion: a feasibility study},
journal = {The Leading Edge},
year = {2024},
month = {8},
volume = {43},
issue = {8},
abstract = {Time-lapse seismic monitoring necessitates integrated workflows that combine seismic and reservoir modeling to enhance reservoir property estimation. We present a feasibility study of an end-to-end inversion framework that directly inverts for permeability from prestack time-lapse seismic data. To assess the method’s robustness, we design experiments focusing on its sensitivity to initial models and potential errors in modeling. Our study leverages the Compass model to simulate CO2 storage in saline aquifers, which is derived from well and seismic data from the North Sea, a candidate site for geological carbon storage.},
keywords = {gcs, 4D, time-lapse, ccs, coupled inversion, end-to-end, fluid-flow, inversion, monitoring},
url = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2024/yin2024tfp/paper.html},
note = {(The Leading Edge)},
doi = {10.1190/tle43080544.1}
}
@ARTICLE{yin2023wise,
author = {Ziyi Yin and Rafael Orozco and Mathias Louboutin and Felix J. Herrmann},
title = {WISE: full-Waveform variational Inference via Subsurface Extensions},
journal = {Geophysics},
year = {2024},
month = {04},
abstract = {We introduce a probabilistic technique for full-waveform inversion, employing variational inference and conditional normalizing flows to quantify uncertainty in migration-velocity models and its impact on imaging. Our approach integrates generative artificial intelligence with physics-informed common-image gathers, reducing reliance on accurate initial velocity models. Considered case studies demonstrate its efficacy producing realizations of migration-velocity models conditioned by the data. These models are used to quantify amplitude and positioning effects during subsequent imaging.},
keywords = {Geophysics, WISE, FWI, RTM, imaging, CIG, conditional normalizing flows, Bayesian inference, amortized variational inference, uncertainty quantification, deep learning, inverse problems, summary statistics, MVA},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2024/yin2023wise/paper.html},
note = {(GEOPHYSICS)},
doi = {10.1190/geo2023-0744.1}
}
%----- 2023 -----%
@ARTICLE{herrmann2023dte,
author = {Felix J. Herrmann},
title = {Digital twins in the era of generative AI},
journal = {The Leading Edge},
year = {2023},
month = {11},
volume = {42},
issue = {11},
keywords = {SLIM, Invertible Networks, ccs, gcs, monitoring, imaging, multiphysics, machine learning, deep learning, time-lapse, inversion, normalizing flows, software},
note = {(The Leading Edge, President's Page)},
doi = {10.1190/tle42110730.1},
url = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2023/herrmann2023dte/PresidentsPage.pdf},
}
@ARTICLE{yin2023smi,
author = {Ziyi Yin and Rafael Orozco and Mathias Louboutin and Felix J. Herrmann},
title = {Solving multiphysics-based inverse problems with learned surrogates and constraints},
journal = {Advanced Modeling and Simulation in Engineering Sciences},
year = {2023},
month = {10},
volume = {10},
abstract = {Solving multiphysics-based inverse problems for geological carbon storage monitoring can be challenging when multimodal time-lapse data are expensive to collect and costly to simulate numerically. We overcome these challenges by combining computationally cheap learned surrogates with learned constraints. Not only does this combination lead to vastly improved inversions for the important fluid-flow property, permeability, it also provides a natural platform for inverting multimodal data including well measurements and active-source time-lapse seismic data. By adding a learned constraint, we arrive at a computationally feasible inversion approach that remains accurate. This is accomplished by including a trained deep neural network, known as a normalizing flow, which forces the model iterates to remain in-distribution, thereby safeguarding the accuracy of trained Fourier neural operators that act as surrogates for the computationally expensive multiphase flow simulations involving partial differential equation solves. By means of carefully selected experiments, centered around the problem of geological carbon storage, we demonstrate the efficacy of the proposed constrained optimization method on two different data modalities, namely time-lapse well and time-lapse seismic data. While permeability inversions from both these two modalities have their pluses and minuses, their joint inversion benefits from either, yielding valuable superior permeability inversions and CO2 plume predictions near, and far away, from the monitoring wells.},
keywords = {AMSES, Fourier neural operators, normalizing flows, multiphysics, deep learning, learned surrogates, learned constraints, inverse problems},
url = {https://doi.org/10.1186/s40323-023-00252-0},
url2 = {https://slim.gatech.edu/Publications/Public/Journals/AMSES/2023/yin2023smi/paper.html},
doi = {10.1186/s40323-023-00252-0},
note = {(Advanced Modeling and Simulation in Engineering Sciences)}
}
@ARTICLE{louboutin2023rte,
author = {Mathias Louboutin and Felix J. Herrmann},
title = {Wave-based inversion at scale on GPUs with randomized trace estimation},
journal = {Geophysical Prospecting},
year = {2023},
month = {07},
abstract = {Thanks to continued performance improvements in software and hardware, wave-equation based imaging technologies, such full-waveform inversion and reverse-time migration, are becoming more common place. However, wide-spread adaptation of these advanced imaging modalities has not yet materialized because current implementations are not able to reap the full benefits from accelerators, in particular those offered by memory-scarce graphics processing units. Through the use of randomized trace estimation, we overcome the memory bottleneck of this type of hardware. At the cost of limited computational overhead and controllable incoherent errors in the gradient, the memory footprint of adjoint-state methods is reduced drastically. Thanks to this relatively simple to implement memory reduction via an approximate imaging condition, we are able to benefit from graphics processing units without memory offloading. We demonstrate the performance of the proposed algorithm on acoustic 2- and 3-D full-waveform inversion examples and on the formation of image gathers in transverse tilted isotropic media.},
keywords = {FWI, Stochastic, Random trace, CIG, GPUs, 3D},
url = {https://slim.gatech.edu/Publications/Public/Journals/GeophysicalProspecting/2023/louboutin2023rte/paper.html},
doi = {10.1111/1365-2478.13405},
note = {(Geophysical Prospecting)}
}
@ARTICLE{grady2022SCtll,
author = {Thomas J. Grady II and Rishi Khan and Mathias Louboutin and Ziyi Yin and Philipp A. Witte and Ranveer Chandra and Russell J. Hewett and Felix J. Herrmann},
title = {Model-Parallel Fourier Neural Operators as Learned Surrogates for Large-Scale Parametric PDEs},
journal = {Computers \& Geosciences},
year = {2023},
month = {07},
volume = {178},
pages = {105402},
abstract = {Fourier neural operators (FNOs) are a recently introduced neural network architecture for learning solution operators of partial differential equations (PDEs), which have been shown to perform significantly better than comparable deep learning approaches. Once trained, FNOs can achieve speed-ups of multiple orders of magnitude over conventional numerical PDE solvers. However, due to the high dimensionality of their input data and network weights, FNOs have so far only been applied to two-dimensional or small three-dimensional problems. To remove this limited problem-size barrier, we propose a model-parallel version of FNOs based on domain-decomposition of both the input data and network weights. We demonstrate that our model-parallel FNO is able to predict time-varying PDE solutions of over 2.6 billion variables on Perlmutter using up to 512 A100 GPUs and show an example of training a distributed FNO on the Azure cloud for simulating multiphase CO2 dynamics in the Earth's subsurface.},
keywords = {Computers and Geosciences, Fourier neural operators, HPC, large-scale, CCS, deep learning, Operator Learning, Model Parallelism, Multiphase Flow},
software = {https://github.com/slimgroup/dfno},
url = {https://slim.gatech.edu/Publications/Public/Journals/ComputersAndGeosciences/2023/grady2022SCtll/grady2022SCtll.pdf},
doi = {10.1016/j.cageo.2023.105402},
note = {(Computers and Geosciences)}
}
@ARTICLE{louboutin2023lmi,
author = {Mathias Louboutin and Ziyi Yin and Rafael Orozco and Thomas J. Grady II and Ali Siahkoohi and Gabrio Rizzuti and Philipp A. Witte and Olav Møyner and Gerard J. Gorman and Felix J. Herrmann},
title = {Learned multiphysics inversion with differentiable programming and machine learning},
journal = {The Leading Edge},
year = {2023},
month = {07},
volume = {42},
issue = {7},
pages={452-516},
abstract = {We present the Seismic Laboratory for Imaging and Modeling/Monitoring (SLIM) open-source software framework for computational geophysics and, more generally, inverse problems involving the wave-equation (e.g., seismic and medical ultrasound), regularization with learned priors, and learned neural surrogates for multiphase flow simulations. By integrating multiple layers of abstraction, our software is designed to be both readable and scalable. This allows researchers to easily formulate their problems in an abstract fashion while exploiting the latest developments in high-performance computing. We illustrate and demonstrate our design principles and their benefits by means of building a scalable prototype for permeability inversion from time-lapse crosswell seismic data, which aside from coupling of wave physics and multiphase flow, involves machine learning.},
keywords = {SLIM, Invertible Networks, ccs, gcs, monitoring, imaging, Fourier neural operators, multiphysics, machine learning, deep learning, time-lapse, inversion, normalizing flows, software},
note = {(The Leading Edge)},
url = {https://library.seg.org/doi/10.1190/tle42070474.1},
url2 = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2023/louboutin2023lmi/le_software.html},
presentation = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2023/louboutin2023lmi/le_software.pdf}
}
@ARTICLE{zhang2023otl,
author = {Yijun Zhang and Ziyi Yin and Oscar Lopez and Ali Siahkoohi and Mathias Louboutin and Rajiv Kumar and Felix J. Herrmann},
title = {Optimized time-lapse acquisition design via spectral gap ratio minimization},
journal = {Geophysics},
year = {2023},
month = {07},
volume = {88},
number = {4},
pages = {A19-A23},
abstract = {Modern-day reservoir management and monitoring of geological carbon storage increasingly call for costly time-lapse seismic data collection. In this letter, we show how techniques from graph theory can be used to optimize acquisition geometries for low-cost sparse 4D seismic. Based on midpoint-offset domain connectivity arguments, the proposed algorithm automatically produces sparse non-replicated time-lapse acquisition geometries that favor wavefield recovery.},
keywords = {time-lapse, JRM, acquisition, survey design, wavefield reconstruction, spectral gap, matrix factorization},
doi = {10.1190/geo2023-0024.1},
note = {(Geophysics)},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2023/zhang2023otl/Spectral_Gap_Paper.html}
}
@ARTICLE{siahkoohi2022ravi,
author = {Ali Siahkoohi and Gabrio Rizzuti and Rafael Orozco and Felix J. Herrmann},
title = {Reliable amortized variational inference with physics-based latent distribution correction},
journal = {Geophysics},
year = {2023},
month = {05},
volume = {88},
number = {3},
abstract = {Bayesian inference for high-dimensional inverse problems is
challenged by the computational costs of the forward operator and the
selection of an appropriate prior distribution. Amortized variational
inference addresses these challenges where a neural network is trained to
approximate the posterior distribution over existing pairs of model and data.
When fed previously unseen data and normally distributed latent samples as
input, the pretrained deep neural network---in our case a conditional
normalizing flow---provides posterior samples with virtually no cost.
However, the accuracy of this approach relies on the availability of
high-fidelity training data, which seldom exists in geophysical inverse
problems because of the highly heterogeneous structure of the Earth. In
addition, accurate amortized variational inference requires the observed data
to be drawn from the training data distribution. As such, we offer a solution
that increases the resilience of amortized variational inference when faced
with data distribution shifts, e.g., changes in the forward model or prior
distribution. Our method involves a physics-based correction to the
conditional normalizing flow latent distribution to provide a more accurate
approximation to the posterior distribution for the observed data at hand. To
accomplish this, instead of a standard Gaussian latent distribution, we
parameterize the latent distribution by a Gaussian distribution with an
unknown mean and diagonal covariance. These unknown quantities are then
estimated by minimizing the Kullback-Leibler divergence between the corrected
and true posterior distributions. While generic and applicable to other
inverse problems, by means of a seismic imaging example, we show that our
correction step improves the robustness of amortized variational inference
with respect to changes in number of source experiments, noise variance, and
shifts in the prior distribution. This approach provides a seismic image with
limited artifacts and an assessment of its uncertainty with approximately the
same cost as five reverse-time migrations.},
keywords = {Variational Inference, Seismic Imaging, Normalizing Flows, Inverse Problems, Uncertainty Quantification},
doi = {10.1190/geo2022-0472.1},
note = {(Geophysics)},
software = {https://github.com/slimgroup/ReliableAVI.jl},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2023/siahkoohi2022ravi/paper.html}
}
@ARTICLE{lopez2020gsb,
author = {Oscar Lopez and Rajiv Kumar and Nick Moldoveanu and Felix J. Herrmann},
title = {Spectral Gap-Based Seismic Survey Design},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
year = {2023},
month = {1},
abstract = {Seismic imaging in challenging sedimentary basins and reservoirs requires acquiring, processing, and imaging very large volumes of data (tens of terabytes). To reduce the cost of acquisition and the time from acquiring the data to producing a subsurface image, novel acquisition systems based on compressive sensing, low-rank matrix recovery, and randomized sampling have been developed and implemented. These approaches allow practitioners to achieve dense wavefield reconstruction from a substantially reduced number of field samples. However, designing acquisition surveys suited for this new sampling paradigm remains a critical and challenging role in oil, gas, and geothermal exploration. Typical random designs studied in the low-rank matrix recovery and compressive sensing literature are difficult to achieve by standard industry hardware. For practical purposes, a compromise between stochastic and realizable samples is needed. In this paper, we propose a deterministic and computationally cheap tool to alleviate randomized acquisition design, prior to survey deployment and large-scale optimization. We consider universal and deterministic matrix completion results in the context of seismology, where a bipartite graph representation of the source-receiver layout allows for the respective spectral gap to act as a quality metric for wavefield reconstruction. We provide realistic scenarios to demonstrate the utility of the spectral gap as a flexible tool that can be incorporated into existing survey design workflows for successful seismic data acquisition via low-rank and sparse signal recovery.},
keywords = {IEEE, Spectral gap, low rank matrix completion, compressive sensing, seismic data, seismic trace interpolation, bipartite graph, biadjacency matrix, nuclear norm minimization},
doi = {10.1109/TGRS.2023.3237464},
url = {https://slim.gatech.edu/Publications/Public/Journals/IEEETGRS/2023/lopez2020gsb/lopez2020gsb.pdf}
}
@ARTICLE{yin2022TLEdgc,
author = {Ziyi Yin and Huseyin Tuna Erdinc and Abhinav Prakash Gahlot and Mathias Louboutin and Felix J. Herrmann},
title = {Derisking geologic carbon storage from high-resolution time-lapse seismic to explainable leakage detection},
journal = {The Leading Edge},
year = {2023},
month = {01},
volume = {42},
number = {1},
pages = {69-76},
abstract = {Geological carbon storage represents one of the few truly scalable technologies capable of reducing the {CO$_2$} concentration in the atmosphere. While this technology has the potential to scale, its success hinges on our ability to mitigate its risks. An important aspect of risk mitigation concerns assurances that the injected {CO$_2$} remains within the storage complex. Amongst the different monitoring modalities, seismic imaging stands out with its ability to attain high resolution and high fidelity images. However, these superior features come, unfortunately, at prohibitive costs and time-intensive efforts potentially rendering extensive seismic monitoring undesirable. To overcome this shortcoming, we present a methodology where time-lapse images are created by inverting non-replicated time-lapse monitoring data jointly. By no longer insisting on replication of the surveys to obtain high fidelity time-lapse images and differences, extreme costs and time-consuming labor are averted. To demonstrate our approach, hundreds of noisy time-lapse seismic datasets are simulated that contain imprints of regular {CO$_2$} plumes and irregular plumes that leak. These time-lapse datasets are subsequently inverted to produce time-lapse difference images used to train a deep neural classifier. The testing results show that the classifier is capable of detecting {CO$_2$} leakage automatically on unseen data and with a reasonable accuracy.},
keywords = {Seismic Imaging, JRM, CCS, classification, CAM, explainability, time-lapse, resolution},
doi = {10.1190/tle42010069.1},
note = {(The Leading Edge)},
software = {https://github.com/slimgroup/GCS-CAM},
url = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2022/yin2022TLEdgc/paper.html}
}
%----- 2022 -----%
@ARTICLE{kukreja2020lcc,
author = {Navjot Kukreja and Jan Hueckelheim and Mathias Louboutin and John Washbourne and Paul H. J. Kelly and Gerard J. Gorman},
title = {Lossy Checkpoint Compression in Full Waveform Inversion},
journal = {Geoscientific Model Development},
year = {2022},
month = {5},
volume = {15},
number = {9},
pages = {3815-3829},
abstract = {This paper proposes a new method that combines check-pointing
methods with error-controlled lossy compression for large-scale
high-performance Full-Waveform Inversion (FWI), an inverse problem commonly
used in geophysical exploration. This combination can significantly reduce
data movement, allowing a reduction in run time as well as peak memory. In
the Exascale computing era, frequent data transfer (e.g., memory bandwidth,
PCIe bandwidth for GPUs, or network) is the performance bottleneck rather
than the peak FLOPS of the processing unit. Like many other adjoint-based
optimization problems, FWI is costly in terms of the number of floating-point
operations, large memory footprint during backpropagation, and data transfer
overheads. Past work for adjoint methods has developed checkpointing methods
that reduce the peak memory requirements during backpropagation at the cost
of additional floating-point computations. Combining this traditional
checkpointing with error-controlled lossy compression, we explore the
three-way tradeoff between memory, precision, and time to solution. We
investigate how approximation errors introduced by lossy compression of the
forward solution impact the objective function gradient and final inverted
solution. Empirical results from these numerical experiments indicate that
high lossy-compression rates (compression factors ranging up to 100) have a
relatively minor impact on convergence rates and the quality of the final
solution.},
keywords = {GMD, Lossy compression, Full waveform inversion, checkpointing, memory},
note = {(Geoscientific Model Development)},
doi = {10.5194/gmd-15-3815-2022},
url = {https://arxiv.org/pdf/2009.12623.pdf}
}
@ARTICLE{siahkoohi2021dbif,
author = {Ali Siahkoohi and Gabrio Rizzuti and Felix J. Herrmann},
title = {Deep Bayesian inference for seismic imaging with tasks},
journal = {Geophysics},
year = {2022},
month = {07},
volume = {87},
number = {5},
pages = {281-302},
abstract = {We propose to use techniques from Bayesian inference and deep
neural networks to translate uncertainty in seismic imaging to uncertainty in
tasks performed on the image, such as horizon tracking. Seismic imaging is an
ill-posed inverse problem because of unavoidable bandwidth and aperture
limitations, which that is hampered by the presence of noise and
linearization errors. Many regularization methods, such as transform-domain
sparsity promotion, have been designed to deal with the adverse effects of
these errors, however, these methods run the risk of biasing the solution and
do not provide information on uncertainty in the image space and how this
uncertainty impacts certain tasks on the image. A systematic approach is
proposed to translate uncertainty due to noise in the data to confidence
intervals of automatically tracked horizons in the image. The uncertainty is
characterized by a convolutional neural network (CNN) and to assess these
uncertainties, samples are drawn from the posterior distribution of the CNN
weights, used to parameterize the image. Compared to traditional priors, in
the literature it is argued that these CNNs introduce a flexible inductive
bias that is a surprisingly good fit for many diverse domains in imaging. The
method of stochastic gradient Langevin dynamics is employed to sample from
the posterior distribution. This method is designed to handle large scale
Bayesian inference problems with computationally expensive forward operators
as in seismic imaging. Aside from offering a robust alternative to maximum a
posteriori estimate that is prone to overfitting, access to these samples
allow us to translate uncertainty in the image, due to noise in the data, to
uncertainty on the tracked horizons. For instance, it admits estimates for
the pointwise standard deviation on the image and for confidence intervals on
its automatically tracked horizons.},
keywords = {deep priors, seismic imaging, uncertainty quantification, horizon tracking},
doi = {10.1190/geo2021-0666.1},
note = {(Geophysics)},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2022/siahkoohi2021dbif/paper.html}
}
%----- 2021 -----%
@ARTICLE{sanavi2021tad,
author = {Hamideh Sanavi and Peyman P. Moghaddam and Felix J. Herrmann},
title = {True amplitude depth migration using curvelets},
journal = {Geophysics},
year = {2021},
month = {07},
volume = {86},
number = {4},
pages = {S299-S310},
abstract = {We have developed a true amplitude solution to the seismic imaging problem. We derive a diagonal scaling approach for the normal operator approximation in the curvelet domain. This is based on the theorem that states that curvelets remain approximately invariant under the action of the normal operator. We use curvelets as essential tools for approximation and inversion. We also exploit the theorem that states that the curvelet-domain approximation should be smooth in phase space by enforcing the smoothness of curvelet coefficients in the angle and space domains. We analyze our method using a reverse time migration-demigration code, simulating the acoustic wave equation on different synthetic models. Our method produces a good resolution with reflecting dips, reproduces the true amplitude reflectors, and compensates for incomplete illumination in seismic images.},
keywords = {imaging, depth migration, inversion, amplitude, lsrtm},
note = {(Geophysics)},
doi = {10.1190/geo2019-0307.1}
}
@ARTICLE{rizzuti2020dfw,
author = {Gabrio Rizzuti and Mathias Louboutin and Rongrong Wang and Felix J. Herrmann},
title = {A dual formulation of wavefield reconstruction inversion for
large-scale seismic inversion},
journal = {Geophysics},
year = {2021},
month = {10},
volume = {86},
number = {6},
pages = {1ND-Z3},
abstract = {Many of the seismic inversion techniques currently proposed that focus on robustness with respect to the background model choice are not apt to large-scale 3D applications, and the methods that are computationally feasible for industrial problems, such as full waveform inversion, are notoriously limited by convergence stagnation and require adequate starting models. We propose a novel solution that is both scalable and less sensitive to starting models or inaccurate parameters (such as anisotropy) that are typically kept fixed during inversion. It is based on a dual reformulation of the classical wavefield reconstruction inversion, whose empirical robustness with respect to these issues is well documented in the literature. While the classical version is not suited to 3D, as it leverages expensive frequency-domain solvers for the wave equation, our proposal allows the deployment of state-of-the-art time-domain finite-difference methods, and is potentially mature for industrial-scale problems.},
keywords = {3D, Full-waveform inversion, Wave equation, Finite-difference},
note = {(Geophysics)},
doi = {10.1190/geo2020-0743.1},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2021/rizzuti2020dfw/rizzuti2020dfw.html}
}
@ARTICLE{yang2020lrpo,
author = {Mengmeng Yang and Marie Graff and Rajiv Kumar and Felix J. Herrmann},
title = {Low-rank representation of omnidirectional subsurface extended image
volumes},
journal = {Geophysics},
year = {2021},
month = {01},
volume = {86},
number = {3},
pages = {1MJ-WA152},
abstract = {Subsurface-offset gathers play an increasingly important role in seismic
imaging. These gathers are used during velocity model building and inversion
of rock properties from amplitude variations. While powerful, these gathers
come with high computational and storage demands to form and manipulate these
high dimensional objects. This explains why only limited numbers of image
gathers are computed over a limited offset range. We avoid these high costs
by working with highly compressed low-rank factorizations. We arrive at these
factorizations via a combination of probings with the double two-way wave
equation and randomized singular value decompositions. In turn, the resulting
factorizations give us access to all subsurface offsets without having to
form the full extended image volumes. The latter is computationally
prohibitive because extended image volumes are quadratic in image size. As a
result, we can easily handle situations where conventional horizontal offset
gathers are no longer focused. More importantly, the factorization also
provides a mechanism to use the invariance relation of extended image volumes
for velocity continuation. With this technique, extended image volumes for
one background velocity model can directly be mapped to those of another
background velocity model. Our low-rank factorization inherits this
invariance property so we incur factorization costs only once when examining
different imaging scenarios. Because all imaging experiments only involve the
factors, they are computationally cheap with costs that scale with the rank
of the factorization. We validate our methodology on 2D synthetics including
a challenging imaging example with salt. Our experiments show that our
low-rank factorization parameterizes extended image volumes naturally.
Instead of brute force explicit cross-correlations between shifted source and receiver
wavefields, our approach relies on the underlying linear-algebra structure
that enables us to work with these objects without incurring unfeasible
demands on computation and storage.},
keywords = {extended image volumes, low rank, randomized linear algebra, power
schemes, invariance relationship},
note = {(Geophysics)},
doi = {10.1190/geo2020-0152.1},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2021/yang2020lrpo/Paper_final.html}
}
%----- 2020 -----%
@ARTICLE{yang2020tdsp,
author = {Mengmeng Yang and Zhilong Fang and Philipp A. Witte and Felix J. Herrmann},
title = {Time-domain sparsity promoting least-squares reverse time migration
with source estimation},
journal = {Geophysical Prospecting},
year = {2020},
month = {08},
volume = {68},
number = {9},
pages = {2697-2711},
abstract = {Least-squares reverse time migration is well-known for its capability to generate artifact-free true-amplitude subsurface images through fitting observed data in the least-squares sense. However, when applied to realistic imaging problems, this approach is faced with issues related to overfitting and excessive computational costs induced by many wave-equation solves. The fact that the source function is unknown complicates this situation even further. Motivated by recent results in stochastic optimization and transform-domain sparsity-promotion, we demonstrate that the computational costs of inversion can be reduced significantly while avoiding imaging artifacts and restoring amplitudes. While powerfull, these new approaches do require accurate information on the source-time function, which is often lacking. Without this information, the imaging quality deteriorates rapidly. We address this issue by presenting an approach where the source-time function is estimated on the fly through a technique known as variable projection. Aside from introducing negligible computational overhead, the proposed method is shown to perform well on imaging problems with noisy data and problems that involve complex settings such as salt. In either case, the presented method produces high resolution high-amplitude fidelity images including an estimates for the source-time function. In addition, due to its use of stochastic optimization, we arrive at these images at roughly one to two times the cost of conventional reverse time migration involving all data.},
keywords = {sparsity inversion, source estimation, penalty},
note = {(Geophysical Prospecting)},
doi = {10.1111/1365-2478.13021},
url = {https://slim.gatech.edu/Publications/Public/Journals/GeophysicalProspecting/2020/yang2020tdsp/yang2020tdsp.html}
}
@ARTICLE{daskalakis2019SIAMJISasr,
author = {Emmanouil Daskalakis and Felix J. Herrmann and Rachel Kuske},
title = {Accelerating Sparse Recovery by Reducing Chatter},
journal = {SIAM Journal on Imaging Sciences},
year = {2020},
month = {07},
volume = {13},
number = {3},
pages = {1211–1239},
abstract = {Compressive Sensing has driven a resurgence of sparse recovery algorithms with
l_1-norm minimization. While these minimizations are relatively well understood for small underdetermined, possibly inconsistent systems, their behavior for large over-determined and inconsistent systems has received much less attention. Specifically, we focus on large systems where computational restrictions call for algorithms that use randomized subsets of rows that are touched a limited number of times. In that regime, l_1-norm minimization algorithms exhibit unwanted fluctuations near the desired solution, and the Linear Bregman iterations are no exception. We explain this observed lack of performance in terms of chatter, a well-known phenomena observed in non-smooth dynamical systems, where intermediate solutions wander between different states stifling convergence. By identifying chatter as the culprit, we modify the Bregman iterations with chatter reducing adaptive element-wise step lengths in combination with potential support detection via threshold crossing. We demonstrate the performance of our algorithm on carefully selected stylized examples and a realistic seismic imaging problem involving millions of unknowns and matrix-free matrix-vector products that involve expensive wave-equation solves.},
keywords = {sparsity promotion, inconsistent linear systems, Kacmarz, linearized Bregman
dynamical systems, non-smooth dynamics, chatter},
note = {(SIAM Journal on Imaging Sciences)},
doi = {10.1137/19M129111X},
url = {https://slim.gatech.edu/Publications/Public/Journals/SIAMJournalOnImagingSciences/2020/daskalakis2019SIAMJISasr/daskalakis2019SIAMJISasr.pdf}
}
@ARTICLE{luporini2018aap,
author = {Fabio Luporini and Mathias Louboutin and Michael Lange and Navjot Kukreja and Philipp A. Witte and Jan Huckelheim and Charles Yount and Paul H. J. Kelly and Felix J. Herrmann and Gerard J. Gorman},
title = {Architecture and performance of Devito, a system for automated stencil computation},
journal = {ACM Trans. Math. Softw.},
year = {2020},
month = {04},
volume = {46},
number = {1},
abstract = {Stencil computations are a key part of many high-performance computing applications, such as image processing, convolutional neural networks, and finite-difference solvers for partial differential equations. Devito is a framework capable of generating highly-optimized code given symbolic equations expressed in Python, specialized in, but not limited to, affine (stencil) codes. The lowering process – from mathematical equations down to C++ code – is
performed by the Devito compiler through a series of intermediate representations. Several performance optimizations are introduced, including advanced common sub-expressions elimination, tiling and parallelization. Some of these are obtained through well-established stencil optimizers, integrated in the back-end of the Devito compiler. The architecture of the Devito compiler, as well as the performance optimizations that are applied when generating code, are presented. The effectiveness of such performance optimizations is demonstrated using operators drawn from seismic imaging applications.},
keywords = {Stencil, finite difference method, symbolic processing, structured grid, compiler, performance optimization},
note = {(ACM Trans. Math. Softw.)},
doi = {10.1145/3374916},
url = {https://slim.gatech.edu/Publications/Public/Journals/ACMTOMS/2020/luporini2018aap/luporini2018aap.pdf}
}
@ARTICLE{witte2019TPDedas,
author = {Philipp A. Witte and Mathias Louboutin and Henryk Modzelewski and Charles Jones and James Selvage and Felix J. Herrmann},
title = {An Event-Driven Approach to Serverless Seismic Imaging in the Cloud},
journal={IEEE Transactions on Parallel and Distributed Systems},
year = {2020},
month = {03},
volume = {31},
number = {9},
pages={2032-2049},
abstract = {Adapting the cloud for high-performance computing (HPC) is a
challenging task, as software for HPC applications hinges on fast network
connections and is sensitive to hardware failures. Using cloud infrastructure
to recreate conventional HPC clusters is therefore in many cases an
infeasible solution for migrating HPC applications to the cloud. As an
alternative to the generic lift and shift approach, we consider the specific
application of seismic imaging and demonstrate a serverless and event-driven
pproach for running large-scale instances of this problem in the cloud.
Instead of permanently running compute instances, our workflow is based on a
serverless architecture with high throughput batch computing and event-driven
computations, in which computational resources are only running as long as
they are utilized. We demonstrate that this approach is very flexible and
allows for resilient and nested levels of parallelization, including domain
decomposition for solving the underlying partial differential equations.
While the event-driven approach introduces some overhead as computational
resources are repeatedly restarted, it inherently provides resilience to
instance shut-downs and allows a significant reduction of cost by avoiding
idle instances, thus making the cloud a viable alternative to on-premise
clusters for large-scale seismic imaging.},
keywords = {cloud, imaging, serverless, event-driven, lsrtm},
note = {(IEEE Transactions on Parallel and Distributed Systems)},
doi={10.1109/TPDS.2020.2982626},
url = {https://slim.gatech.edu/Publications/Public/Journals/IEEETPDS/2020/witte2019TPDedas/witte2019TPDedas.html}
}
%----- 2019 -----%
@ARTICLE{siahkoohi2019itl,
author = {Ali Siahkoohi and Mathias Louboutin and Felix J. Herrmann},
title = {The importance of transfer learning in seismic modeling and imaging},
journal = {Geophysics},
year = {2019},
abstract = {Accurate forward modeling is essential for solving inverse problems
in exploration seismology. Unfortunately, it is often not possible to afford being
physically or numerically accurate. To overcome this conundrum, we make use of raw and processed data from nearby surveys. We propose to use this data, consisting of shot records or velocity models, to pre-train a neural network to
correct for the effects of, for instance, the free surface or numerical dispersion, both of which can be considered as proxies for incomplete or inaccurate physics. Given this pre-trained neural network, we apply transfer learning to finetune this pre-trained neural network so it performs well on its task of mapping low-cost, but low-fidelity, solutions to high-fidelity solutions for the current survey. As long as we can limit ourselves during finetuning
to using only a small fraction of high-fidelity data, we gain processing the current survey while using information from nearby surveys. We demonstrate this principle by removing surface-related multiples and ghosts from shot records and the effects of numerical dispersion from migrated images and wave simulations},
keywords = {deep learning, transfer learning, modeling, imaging, SRME},
doi = {10.1190/geo2019-0056.1},
note = {(GEOPHYSICS)},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2019/siahkoohi2019itl/siahkoohi2019itl.html}
}
@ARTICLE{kukreja2019PASCccd,
author = {Navjot Kukreja and Jan Huckelheim and Mathias Louboutin and Paul Hovland and Gerard Gorman},
title = {Combining checkpointing and data compression to accelerate adjoint-based optimization problems},
journal = {Euro-Par 2019: Parallel Processing},
year = {2019},
pages = {87-100},
publisher = {Springer International Publishing},
abstract = {Seismic inversion and imaging are adjoint-based optimization problems that processes up to terabytes of data, regularly exceeding the memory capacity of available computers. Data compression is an effective strategy to reduce this memory requirement by a certain factor, particularly if some loss in accuracy is acceptable. A popular alternative is checkpointing, where data is stored at selected points in time, and values at other times are recomputed as needed from the last stored state. This allows arbitrarily large adjoint computations with limited memory, at the cost of additional recomputations. In this paper we combine compression and checkpointing for the first time to compute a realistic seismic inversion. The combination of checkpointing and compression allows larger adjoint computations compared to using only compression, and reduces the recomputation overhead significantly compared to using only checkpointing.},
keywords = {Adjoint-state, FD, checkpointing, compression, HPC, inverse problems},
doi = {10.1007/978-3-030-29400-7_7},
note = {(Euro-Par 2019: Parallel Processing)},
url = {https://slim.gatech.edu/Publications/Public/Journals/PASC/2019/kukreja2019PASCccd/kukreja2019PASCccd.pdf}
}
@ARTICLE{peters2019aos,
author = {Bas Peters and Felix J. Herrmann},
title = {Algorithms and software for projections onto intersections of convex
and non-convex sets with applications to inverse problems},
journal = {arXiv e-prints},
year = {2019},
month = {03},
abstract = {We propose algorithms and software for computing projections onto the intersection of multiple convex and non-convex constraint sets. The software package, called SetIntersectionProjection, is intended for the regularization of inverse problems in physical parameter estimation and image processing. The primary design criterion is working with multiple sets, which allows us to solve inverse problems with multiple pieces of prior knowledge. Our algorithms outperform the well known Dykstra's algorithm when individual sets are not easy to project onto because we exploit similarities between constraint sets. Other design choices that make the software fast and practical to use, include recently developed automatic selection methods for auxiliary algorithm parameters, fine and coarse grained parallelism, and a multilevel acceleration scheme. We provide implementation details and examples that show how the software can be used to regularize inverse problems. Results show that we benefit from working with all available prior information and are not limited to one or two regularizers because of algorithmic, computational, or hyper-parameter selection issues.},
keywords = {software, algorithm},
eprint={1902.09699},
note = {(arXiv)},
url = {http://arxiv.org/abs/1902.09699}
}
@ARTICLE{peters2019gms,
author = {Bas Peters and Felix J. Herrmann},
title = {Generalized Minkowski sets for the regularization of inverse problems},
journal = {arXiv e-prints},
year = {2019},
month = {03},
abstract = {Many works on inverse problems in the imaging sciences consider regularization via one or more penalty
functions or constraint sets. When the models/images are not easily described using one or a few penalty
functions/constraints, additive model descriptions for regularization lead to better imaging results. These
include cartoon-texture decomposition, morphological component analysis, and robust principal component
analysis; methods that typically rely on penalty functions. We propose a regularization framework, based
on the Minkowski set, that merges the strengths of additive models and constrained formulations. We
generalize the Minkowski set, such that the model parameters are the sum of two components, each of
which is constrained to an intersection of sets. Furthermore, the sum of the components is also an element
of another intersection of sets. These generalizations allow us to include multiple pieces of prior knowledge
on each of the components, as well as on the sum of components, which is necessary to ensure physical
feasibility of partial-differential-equation based parameters estimation problems. We derive the projection
operation onto the generalized Minkowski sets and construct an algorithm based on the alternating
direction method of multipliers. We illustrate how we benefit from using more prior knowledge in the
form of the generalized Minkowski set using seismic waveform inversion and video background-anomaly
separation.},
keywords = {inverse},
eprint={1903.03942},
note = {(arXiv)},
url = {http://arxiv.org/abs/1903.03942}
}
@ARTICLE{witte2018alf,
author = {Philipp A. Witte and Mathias Louboutin and Navjot Kukreja and Fabio Luporini and Michael Lange and Gerard J. Gorman and Felix J. Herrmann},
title = {A large-scale framework for symbolic implementations of seismic inversion algorithms in Julia},
journal = {Geophysics},
volume = {84},
number = {3},
pages = {F57-F71},
year = {2019},
month = {03},
abstract = {Writing software packages for seismic inversion is a very challenging task, since problems such as full-waveform inversion or least-squares imaging are both algorithmically and computationally demanding due to the large number of unknown parameters and the fact that we are propagating waves over many wavelengths. Software frameworks therefore need to combine both versatility and performance to provide geophysicists with the means and flexibility to implement complex algorithms that scale to exceedingly large 3D problems. Following these principles, we introduce the Julia Devito Inversion framework, an open-source software package in Julia for large-scale seismic modeling and inversion based on Devito, a domain-specific language compiler for automatic code generation. The framework consists of matrix-free linear operators for implementing seismic inversion algorithms that closely resembles the mathematical notation, a flexible resilient parallelization and an interface to Devito for generating optimized stencil code to solve the underlying wave equations. In comparison to many manually optimized industry codes written in low-level languages, our software is built on the idea of independent layers of abstractions and user interfaces with symbolic operators, making it possible to manage both the complexity of algorithms and performance optimizations, while preserving modularity, which allows for a level of expressiveness needed to formulate a broad range of wave-equation-based inversion problems. Through a series of numerical examples, we demonstrate that this allows users to implement algorithms for waveform inversion and imaging as simple Julia scripts that scale to large-scale 3D problems; thus providing a truly performant research and production framework.},
keywords = {FWI, LSRTM, modeling, inversion, software},
doi = {10.1190/geo2018-0174.1},
note = {(Geophysics)},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2019/witte2018alf/witte2018alf.pdf}
}
@ARTICLE{witte2018cls,
author = {Philipp A. Witte and Mathias Louboutin and Fabio Luporini and Gerard J. Gorman and Felix J. Herrmann},
title = {Compressive least-squares migration with on-the-fly Fourier transforms},
journal = {Geophysics},
volume = {84},
number = {5},
pages = {R655-R672},
year = {2019},
month = {08},
abstract = {Least-squares reverse-time migration is a powerful approach for true amplitude seismic imaging of complex geological structures, but the successful application of this method is currently hindered by its enormous computational cost, as well as high memory requirements for computing the gradient of the objective function. We tackle these problems by introducing an algorithm for low-cost sparsity-promoting least-squares migration using on-the-fly Fourier transforms. We formulate the least-squares migration objective function in the frequency domain and compute gradients for randomized subsets of shot records and frequencies, thus significantly reducing data movement and the number of overall wave equations solves. By using on-the-fly Fourier transforms, we can compute an arbitrary number of monochromatic frequency-domain wavefields with a time-domain modeling code, instead of having to solve individual Helmholtz equations for each frequency, which quickly becomes computationally infeasible when moving to high frequencies. Our numerical examples demonstrate that compressive imaging with on-the-fly Fourier transforms provides a fast and memory-efficient alternative to time-domain imaging with optimal checkpointing, whose memory requirements for a fixed background model and source wavelet is independent of the number of time steps. Instead, memory and additional computational cost grow with the number of frequencies and determine the amount of subsampling artifacts and crosstalk. In contrast to optimal checkpointing, this offers the possibility to trade both memory and computational cost for image quality or a larger number of iterations and is advantageous in new computing environments such as the cloud, where compute is often cheaper than memory and data movement.},
keywords = {least squares migration, Fourier, sparsity-promotion},
doi = {10.1190/geo2018-0490.1},
note = {(Geophysics)},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2019/witte2018cls/witte2018cls.pdf}
}
@ARTICLE{witte2019ecl,
author = {Philipp A. Witte and Mathias Louboutin and Navjot Kukreja and Fabio Luporini and Michael Lange and Gerard J. Gorman and Felix J. Herrmann},
title = {Geophysics Bright Spots: Efficient coding of large-scale seismic
inversion algorithms},
journal = {The Leading Edge},
volume = {38},
number = {6},
pages = {482-484},
year = {2019},
abstract = {In “A large-scale framework for symbolic implementations of seismic inversion algorithms in Julia,” Witte et al. describe new developments in how to code complex geophysical algorithms in a concise way. Subsurface seismic imaging and parameter estimation are among the most computationally challenging problems in the scientific community. Codes for solving seismic inverse problems, such as FWI or least-squares reverse time migration (LS-RTM), need to be highly optimized, but at the same time, facilitate the implementation of complex optimization algorithms. Traditionally, production-level codes in the oil and gas industry were exclusively written in low-level languages, such as C or Fortran, with extensive amounts of manual performance optimizations, thus making code maintenance, debugging, and adoption of new algorithms prohibitively challenging. Witte et al. present a paradigm of software engineering for seismic inverse problems based on symbolic user interfaces and code generation with automated performance optimization. Inspired by recent deep learning frameworks, the Julia Devito inversion framework (JUDI; an open-source software package) combines high-level abstractions for expressing seismic inversion algorithms with a domain-specific language compiler called Devito for solving the underlying wave equations. Devito's generated code is compiled just in time and outperforms codes with manual performance optimizations. JUDI utilizes Julia's high-level parallelization, making the software easily adaptable to a variety of computing environments such as densely connected HPC clusters or the cloud. The numerical examples (Figure 3) demonstrate the ability to implement a variety of complex algorithms for FWI and LS-RTM in a few lines of Julia code and run it on large-scale 3D models. The paper concludes that abstractions and performance are not mutually exclusive, and use of symbolic user interfaces can facilitate the implementation of new and innovative seismic inversion algorithms.},
keywords = {bright spots, inversion, large-scale, julia},
doi = {10.1190/tle38060482.1},
note = {(The Leading Edge)},
url = {https://library.seg.org/doi/10.1190/tle38060482.1}
}
@ARTICLE{kumar2018toi,
author = {Rajiv Kumar and Marie Graff-Kray and Ivan Vasconcelos and Felix J. Herrmann},
title = {Target-oriented imaging using extended image volumes—a low-rank factorization approach},
journal = {Geophysical Prospecting},
volume = {67},
number = {5},
pages = {1312-1328},
year = {2019},
abstract = {Imaging in geological challenging environments has led to new developments, including the idea of generating reflection responses by means of interferometric redatuming at a given target datum in the subsurface, when the target datum lies beneath a complex overburden. One way to perform this redatuming is via conventional model-based wave-equation techniques. But those techniques can be computationally expensive for large-scale seismic
problems since the number of wave-equation solves is equal to two-times the number of
sources involved during seismic data acquisition. Also conventional shot-profile
techniques require lots of memory to save full subsurface extended image volumes. Therefore,
they only form subsurface image volumes in either horizontal or vertical directions. We now present a
randomized singular value decomposition based approach built upon the matrix
probing scheme, which takes advantage of the algebraic structure of the extended
imaging system. This low-rank representation enables us to overcome both the computational
cost associated with the number of wave-equation solutions and memory usage due to explicit
storage of full subsurface extended image volumes employed by conventional migration
methods. Experimental results on complex geological models demonstrate the efficacy of
the proposed methodology and allow practical reflection-based extended imaging for large-scale 5D seismic data.},
keywords = {randomized linear algebra, extended image volumes, target-oriented imaging},
doi = {10.1111/1365-2478.12779},
note = {(Geophysical Prospecting)},
url = {https://slim.gatech.edu/Publications/Public/Journals/GeophysicalProspecting/2019/kumar2018toi/kumar2018toi.html}
}
@ARTICLE{louboutin2018dae,
author = {Mathias Louboutin and Michael Lange and Fabio Luporini and Navjot Kukreja and Philipp A. Witte and Felix J. Herrmann and Paulius Velesko and Gerard J. Gorman},
title = {Devito (v3.1.0): an embedded domain-specific language for finite differences and geophysical exploration},
journal = {Geoscientific Model Development},
year = {2019},
abstract = {We introduce Devito, a new domain-specific language for implementing
high-performance finite difference partial differential equation solvers.
The motivating application is exploration seismology where methods such as
Full-Waveform Inversion and Reverse-Time Migration are used to
invert terabytes of seismic data to create images of the earth's
subsurface. Even using modern supercomputers, it can take weeks to process a
single seismic survey and create a useful subsurface image. The computational cost is dominated by the numerical solution of wave equations and their corresponding adjoints. Therefore, a great deal of effort is invested in aggressively optimizing the performance of these wave-equation propagators for different computer architectures. Additionally, the actual set of partial differential equations being solved and their numerical discretization is under constant innovation as increasingly realistic representations of the physics are developed, further ratcheting up the cost of practical solvers. By embedding a domain-specific language within Python and making heavy use of SymPy, a symbolic mathematics library, we make it possible to develop finite difference simulators quickly using a
syntax that strongly resembles the mathematics. The Devito compiler reads this code and applies a wide range of analysis to generate highly optimized and parallel code. This approach can reduce the development time of a verified and optimized solver from months to days.},
keywords = {wave-equation, modeling, finite-differences, HPC},
doi = {10.5194/gmd-12-1165-2019},
note = {(Geoscientific Model Development)},
url = {https://slim.gatech.edu/Publications/Public/Journals/GMD/2019/louboutin2018dae/louboutin2018dae.pdf}
}
@ARTICLE{silva2018alr,
author = {Curt Da Silva and Yiming Zhang and Rajiv Kumar and Felix J. Herrmann},
title = {Applications of low-rank compressed seismic data to full waveform inversion and extended image volumes},
journal = {Geophysics},
year = {2019},
abstract = { Conventional oil and gas fields are increasingly difficult to explore and image, resulting in the call for more complex wave-equation based inversion algorithms that require dense long-offset samplings. Consequently, there is an exponential growth in the size of data volumes and prohibitive demands on computational resources. In this work, we propose a method to compress and process seismic data directly in a low-rank tensor format, which drastically reduces the amount of storage required to represent the data. We first outline how seismic data exhibits low-rank structure in a particular transform-domain, which can be exploited to compress the dense data in one extremely storage-efficient tensor format when the data is fully sampled. In the more realistic case of missing data, we can use efficient interpolation techniques to approximate the fully sampled volume in compressed form. In either case, once we have our data represented in its compressed tensor form, we design an algorithm to extract source or receiver gathers directly from the compressed parameters. This extraction process can be done on-the-fly directly on the compressed data, in a full waveform inversion context, and does not require scanning through the entire dataset in order to form shot gathers. To the best of our knowledge, this work is one of the first major contributions to working with seismic data applications directly in the compressed domain without reconstructing the entire data volume. We use a stochastic inversion approach, which works with small subsets of source experiments at each iteration, further reducing the computational and memory costs of full waveform inversion. We also demonstrate how this data compression and extraction technique can be applied to forming full subsurface image gathers through probing techniques.},
keywords = {low rank, tensor, common shot/receiver gather, full waveform inversion, extended image volume},
doi = {10.1190/geo2018-0116.1},
note = {Accepted on January 22, 2019.},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2019/silva2018alr/silva2018alr.pdf}
}
@ARTICLE{dasilva2017uls,
author = {Curt Da Silva and Felix J. Herrmann},
title = {A unified {2D/3D} large scale software environment for nonlinear inverse problems},
journal = {ACM Transactions on Mathematical Software},
year = {2019},
abstract = {Large scale parameter estimation problems are some of
the most computationally demanding problems. An
academic researcher's domain-specific knowledge
often precludes that of software design, which
results in software frameworks for inversion that
are technically correct, but not scalable to
realistically-sized problems. On the other hand, the
computational demands of the problem for realistic
problems result in industrial codebases that are
geared solely for performance, rather than
comprehensibility or flexibility. We propose a new
software design that bridges the gap between these
two seemingly disparate worlds. A hierarchical and
modular design allows a user to delve into as much
detail as she desires, while using high performance
primitives at the lower levels. Our code has the
added benefit of actually reflecting the underlying
mathematics of the problem, which lowers the
cognitive load on user using it and reduces the
initial startup period before a researcher can be
fully productive. We also introduce a new
preconditioner for the Helmholtz equation that is
suitable for fault-tolerant distributed
systems. Numerical experiments on a variety of 2D
and 3D test problems demonstrate the effectiveness
of this approach on scaling algorithms from small to
large scale problems with minimal code changes.},
keywords = {optimization, PDE-constrained inversion, large scale, matlab},
note = {Accepted on January 27, 2019.},
url = {https://slim.gatech.edu/Publications/Public/Journals/ACMTOMS/2019/dasilva2017uls/dasilva2017uls.html}
}
%----- 2018 -----%
@ARTICLE{liu2018ssi,
author = {Michelle Liu and Rajiv Kumar and Eldad Haber and Aleksandr Y. Aravkin},
title = {Simultaneous-shot inversion for PDE-constrained optimization problems with missing data},
journal = {Inverse Problems},
volume = {35},
number = {2},
pages = {025003},
year = {2018},
abstract = {Stochastic optimization is key to efficient inversion in
PDE-constrained optimization. Using `simultaneous shots', or random
superposition of source terms, works very well in simple acquisition
geometries where all sources see all receivers, but this rarely occurs in practice. We
develop an approach that interpolates data to an ideal acquisition geometry
while solving the inverse problem using simultaneous shots. The approach is
formulated as a joint inverse problem, combining ideas from low-rank
interpolation with full-waveform inversion. Results using synthetic
experiments illustrate the flexibility and efficiency of the approach.},
keywords = {Optimization, low-rank interpolation, full-waveform inversion},
doi = {10.1088/1361-6420/aaf317},
note = {(Inverse Problems)},
url = {https://slim.gatech.edu/Publications/Public/Journals/InverseProblems/2018/liu2018ssi/liu2018ssi.pdf}
}
@ARTICLE{fang2017sewri,
author = {Zhilong Fang and Rongrong Wang and Felix J. Herrmann},
title = {Source estimation for wavefield-reconstruction inversion},
journal = {Geophysics},
volume = {83},
number = {4},
pages = {R345-R359},
year = {2018},
abstract = {Source estimation is essential to all the wave-equation-based seismic inversions, including full-waveform inversion and the recently proposed wavefield-reconstruction inversion. When the source estimation is inaccurate, errors will propagate into the predicted data and introduce additional data misfit. As a consequence, inversion results that minimize this data misfit may become erroneous. To mitigate the errors introduced by the incorrect and pre-estimated sources, an embedded procedure that updates sources along with medium parameters is necessary for the inversion. So far, such a procedure is still missing in the context of wavefield-reconstruction inversion, a method that is, in many situations, less prone to local minima related to the so-called cycle skipping, compared to full-waveform inversion through exact data-fitting. While wavefield-reconstruction inversion indeed helps to mitigate issues related to cycle skipping by extending the search space with wavefields as auxiliary variables, it relies on having access to the correct source functions. In this paper, we remove the requirement of having the accurate source functions by proposing a source estimation technique specifically designed for wavefield-reconstruction inversion. To achieve this task, we consider the source functions as unknown variables and arrive at an objective function that depends on the medium parameters, wavefields, and source functions. During each iteration, we apply the so-called variable projection method to simultaneously project out the source functions and wavefields. After the projection, we obtain a reduced objective function that only depends on the medium parameters and invert for the unknown medium parameters by minimizing this reduced objective. Numerical experiments illustrate that this approach can produce accurate estimates of the unknown medium parameters without any prior information of the source functions.},
keywords = {WRI, source, estimation, variable projection, FWI},
doi = {10.1190/geo2017-0700.1},
note = {(Geophysics)},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2018/fang2017sewri/fang2017sewri.html}
}
@ARTICLE{fang2017uqfip,
author = {Zhilong Fang and Curt Da Silva and Rachel Kuske and Felix J. Herrmann},
title = {Uncertainty quantification for inverse problems with weak partial-differential-equation constraints},
journal = {Geophysics},
volume = {83},
number = {6},
pages = {R629-R647},
year = {2018},
abstract = { In a statistical inverse problem, the objective is a complete
statistical description of unknown parameters from noisy observations in order to quantify uncertainties of the parameters of interest. We consider inverse problems with partial-differential-equation-constraints, which are applicable to
a variety of seismic problems. Bayesian inference is one of the most widely-used approaches to precisely quantify statistics through a posterior distribution, incorporating uncertainties in observed data, modeling kernel, and prior knowledge of the parameters. Typically when formulating the posterior distribution, the partial-differential-equation-constraints are required to be exactly satisfied, resulting in a highly nonlinear forward map and a posterior distribution with many local maxima. These drawbacks make it difficult to find an appropriate approximation for the posterior distribution. Another complicating factor is that traditional Markov chain Monte Carlo methods are known to converge slowly for realistically sized problems. In this work, we relax the partial-differential-equation-constraints by introducing an auxiliary variable, which allows for Gaussian deviations in the partial-differential-equations. Thus, we obtain a new bilinear posterior distribution consisting of both data and partial-differential-equation misfit terms. We illustrate that for a particular range of variance choices for the partial-differential-equation misfit term, the new posterior distribution has fewer modes and can be well-approximated by a Gaussian distribution, which can then be sampled in a straightforward manner. Since it is prohibitively expensive to explicitly construct the dense covariance matrix of the Gaussian approximation for intermediate to large-scale problems, we present a method to implicitly construct it, which enables efficient sampling. We apply this framework to two-dimensional seismic inverse problems with 1,800 and 92,455 unknown parameters. The results illustrate that our framework can produce comparable statistical quantities to those produced by conventional Markov chain Monte Carlo type methods while requiring far fewer partial-differential-equation solves, which are the main computational bottlenecks in these problems.},
keywords = {UQ, FWI, acoustic, weak-constraint},
doi = {10.1190/geo2017-0824.1},
note = {(Geophysics)},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2018/fang2017uqfip/fang2017uqfip.html}
}
@ARTICLE{peters2018pmf,
author = {Bas Peters and Brendan R. Smithyman and Felix J. Herrmann},
title = {Projection methods and applications for seismic nonlinear inverse problems with multiple constraints},
journal = {Geophysics},
volume = {84},
number = {2},
pages = {R251-R269},
year = {2018},
month = {02},
abstract = {Nonlinear inverse problems are often hampered by non-uniqueness and local minima because of missing low frequencies and far offsets in the data, lack of access to good starting models, noise, and modeling errors. A well-known approach to counter these deficiencies is to include prior information on the unknown model, which regularizes the inverse problem. While conventional regularization methods have resulted in enormous progress in ill-posed (geophysical) inverse problems, challenges remain when the prior information consists of multiple pieces. To handle this situation, we propose an optimization framework that allows us to add multiple pieces of prior information in the form of constraints. Compared to additive regularization penalties, constraints have a number of advantages making them more suitable for inverse problems such as full-waveform inversion. The proposed framework is rigorous because it offers assurances that multiple constraints are imposed uniquely at each iteration, irrespective of the order in which they are invoked. To project onto the intersection of multiple sets uniquely, we employ Dykstra’s algorithm that scales to large problems and does not rely on trade-off parameters. In that sense, our approach differs substantially from approaches such as Tikhonov regularization, penalty methods, and gradient filtering. None of these offer assurances, which makes them less suitable to full-waveform inversion where unrealistic intermediate results effectively derail the iterative inversion process. By working with intersections of sets, we keep expensive objective and gradient calculations unaltered, separate from projections, and we also avoid trade-off parameters. These features allow for easy integration into existing code bases. In addition to more predictable behavior, working with constraints also allows for heuristics where we built up the complexity of the model gradually by relaxing the constraints. This strategy helps to avoid convergence to local minima that represent unrealistic models. We illustrate this unique feature with examples of varying complexity.},
keywords = {full-waveform inversion, optimization, constraints, regularization, projection, intersection},
doi = {10.1190/geo2018-0192.1},
note = {(Geophysics)},
url = {https://slim.gatech.edu/Publications/Public/Journals/Geophysics/2018/peters2018pmf/peters2018pmf.html}
}
@ARTICLE{sharan2018fsp,
author = {Shashin Sharan and Rongrong Wang and Felix J. Herrmann},
title = {Fast sparsity-promoting microseismic source estimation},
journal = {Geophysical Journal International},
volume = {216},
number = {1},
pages = {164-181},
year = {2019},
month = {01},
abstract = {Microseismic events are generated during hydraulic fracturing of unconventional reservoirs and carry information on fracture locations and the origin times associated with these microseismic events. For drilling purposes and to prevent hazardous situations, we need to have accurate knowledge on the fracture locations as well as on their size, and their density. Because microseismic waves can travel far distances, microseismic data collected at the surface and or in boreholes can help us to monitor hydraulic fracturing. While so-called back propagation or time-reversal methods are able to focus recorded energy back onto the sources when a reasonable velocity model is available, these methods suffer from blurring especially in situations where the data acquisition suffers from lack of aperture, sparse sampling, and noise. As a result, these methods typically cannot resolve sources in close proximity, a desired feature since we need this information if we want to follow the fracture evolution in space and time. In that situation, we need to estimate the locations and the associated source-time functions for closely spaced microseismic sources along the active fractures. To overcome the limitations of time-reversal methods, we propose a wave-equation based inversion approach where we invert for the complete source wavefield in both space and time. By promoting sparsity on the source wavefield in space, we negate the effects of non-radiating sources during the inversion and obtain high-resolution intensity plots and high-fidelity estimates for the source-time functions. We obtain these results relatively fast by accelerating the linearized Bregman method with a dual formulation. Through experiments, we demonstrate that our method is computationally feasible, robust to noise, and works for closely spaced sources with overlapping source-time functions in complex geological settings.},
keywords = {Waveform inversion, Joint inversion, induced seismicity},
doi = {10.1093/gji/ggy415},
note = {(Geophysical Journal International)},
url = {https://slim.gatech.edu/Publications/Public/Journals/GeophysicalJournalInternational/2018/sharan2018fsp/sharan2018fsp.pdf}
}
@ARTICLE{witte2018fwip3,
author = {Philipp A. Witte and Mathias Louboutin and Keegan Lensink and Michael Lange and Navjot Kukreja and Fabio Luporini and Gerard Gorman and Felix J. Herrmann},
title = {Full-Waveform Inversion - Part 3: optimization},
journal = {The Leading Edge},
volume = {37},
number = {2},
pages = {142-145},
year = {2018},
month = {01},
abstract = {This tutorial is the third part of a full-waveform inversion (FWI) tutorial series with a step-by-step walkthrough of setting up forward and adjoint wave equations and building a basic FWI inversion framework. For discretizing and solving wave equations, we use Devito, a Python-based domain-specific language for automated generation of finite-difference code (Lange et al., 2016). The first two parts of this tutorial (Louboutin et al., 2017, 2018) demonstrated how to solve the acoustic wave equation for modeling seismic shot records and how to compute the gradient of the FWI objective function using the adjoint-state method. With these two key ingredients, we will now build an inversion framework that can be used to minimize the FWI least-squares objective function.},
keywords = {devito, finite-differences, FWI, Modeling, tutorial, inversion},
doi = {10.1190/tle37020142.1},
note = {(The Leading Edge)},
url = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2018/witte2018fwip3/witte2018fwip3.html}
}
%----- 2017 -----%
@ARTICLE{kumar2017hrc,
author = {Rajiv Kumar and Haneet Wason and Shashin Sharan and Felix J. Herrmann},
title = {Highly repeatable {3D} compressive full-azimuth towed-streamer time-lapse acquisition --- a numerical feasibility study at scale},
journal = {The Leading Edge},
volume = {36},
number = {8},
pages = {677-687},
year = {2017},
month = {08},
abstract = {Most conventional 3D time-lapse (or 4D) acquisitions are
ocean-bottom cable (OBC) or ocean-bottom node (OBN)
surveys since these surveys are relatively easy to
replicate compared to towed-streamer surveys. To
attain high degrees of repeatability, survey
replicability and dense periodic sampling has become
the norm for 4D surveys that renders this technology
expensive. Conventional towed-streamer acquisitions
suffer from limited illumination of subsurface due
to narrow azimuth. Although, acquisition techniques
such as multi-azimuth, wide-azimuth, rich-azimuth
acquisition, etc., have been developed to illuminate
the subsurface from all possible angles, these
techniques can be prohibitively expensive for
densely sampled surveys. This leads to uneven
sampling, i.e., dense receiver and coarse source
sampling or vice-versa, in order to make these
acquisitions more affordable. Motivated by the
design principles of Compressive Sensing (CS), we
acquire economic, randomly subsampled (or
compressive) and simultaneous towed-streamer
time-lapse data without the need of replicating the
surveys. We recover densely sampled time-lapse data
on one and the same periodic grid by using a
joint-recovery model (JRM) that exploits shared
information among different time-lapse recordings,
coupled with a computationally cheap and scalable
rank-minimization technique. The acquisition is low
cost since we have subsampled measurements (about
70\% subsampled), simulated with a simultaneous
long-offset acquisition configuration of two source
vessels travelling across a survey area at random
azimuths. We analyze the performance of our proposed
compressive acquisition and subsequent recovery
strategy by conducting a synthetic, at scale,
seismic experiment on a 3D time-lapse model
containing geological features such as channel
systems, dipping and faulted beds, unconformities
and a gas cloud. Our findings indicate that the
insistence on replicability between surveys and the
need for OBC/OBN 4D surveys can, perhaps, be
relaxed. Moreover, this is a natural next step
beyond the successful CS acquisition examples
discussed in this special issue.},
keywords = {time-lapse seismic, marine, 3D, simultaneous long offset, CS, rank minimization},
doi = {10.1190/tle36080677.1},
note = {(The Leading Edge)},
url = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2017/kumar2017hrc/kumar2017hrc.html}
}
@ARTICLE{louboutin2017fwi,
author = {Mathias Louboutin and Philipp A. Witte and Michael Lange and Navjot Kukreja and Fabio Luporini and Gerard Gorman and Felix J. Herrmann},
title = {Full-Waveform Inversion - Part 1: forward modeling},
journal = {The Leading Edge},
volume = {36},
number = {12},
pages = {1033-1036},
year = {2017},
month = {12},
abstract = {Since its re-introduction by Pratt (1999), full-waveform inversion (FWI) has gained a lot of attention in geophysical exploration because of its ability to build high resolution velocity models more or less automatically in areas of complex geology. While there is an extensive and growing literature on the topic, publications focus mostly on technical aspects, making this topic inaccessible for a broader audience due to the lack of simple introductory resources for newcomers to geophysics. We will accomplish this by providing a hands-on walkthrough of FWI using Devito (Lange et al. 2016), a system based on domain-specific languages that automatically generates code for time-domainfinite-differences.},
keywords = {finite-differences, devito, tutorial, FWI, modeling},
doi = {10.1190/tle36121033.1},
note = {(The Leading Edge)},
url = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2017/louboutin2017fwi/louboutin2017fwi.html}
}
@ARTICLE{louboutin2017fwip2,
author = {Mathias Louboutin and Philipp A. Witte and Michael Lange and Navjot Kukreja and Fabio Luporini and Gerard Gorman and Felix J. Herrmann},
title = {Full-Waveform Inversion - Part 2: adjoint modeling},
journal = {The Leading Edge},
volume = {37},
number = {1},
pages = {69-72},
year = {2018},
month = {01},
abstract = {This tutorial is the second part of a three part tutorial series on full-waveform inversion (FWI), in which we provide a step by step walk through of setting up forward and adjoint wave equation solvers and an optimization framework for inversion. In part 1 (Louboutin et al., 2017), we demonstrated how to discretize the acoustic wave equation and how to set up a basic forward modeling scheme using Devito, a domain-specific language (DSL) in Python for automated finite-difference (FD) computations (Lange et al., 2016). Devito allows us to define wave equations as symbolic Python expressions (Meurer et al., 2017), from which optimized FD stencil code is automatically generated at run time. In part 1, we show how we can use Devito to set up and solve acoustic wave equations with (impulsive) seismic sources and sample wavefields at the receiver locations to model shot records.},
keywords = {tutorial, devito, finite-difference, acoustic, FWI},
doi = {10.1190/tle37010069.1},
note = {(The Leading Edge)},
url = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2018/louboutin2017fwip2/louboutin2017fwip2.html}
}
@ARTICLE{oghenekohwo2017hrt,
author = {Felix Oghenekohwo and Felix J. Herrmann},
title = {Highly repeatable time-lapse seismic with distributed {Compressive} {Sensing}---mitigating effects of calibration errors},
journal = {The Leading Edge},
year = {2017},
month = {08},
volume = {36},
number = {8},
pages = {688-694},
abstract = {Recently, we demonstrated that combining joint recovery
with low-cost non-replicated randomized sampling
tailored to time-lapse seismic can give us access to
high fidelity, highly repeatable, dense prestack
vintages, and high-grade time-lapse. To arrive at
this result, we assumed well-calibrated
surveys---i.e., we presumed accurate post-plot
source/receiver positions. Unfortunately, in
practice seismic surveys are prone to calibration
errors, which are unknown deviations between actual
and post-plot acquisition geometry. By means of
synthetic experiments, we analyze the possible
impact of these errors on vintages and on time-lapse
data obtained with our joint recovery model from
compressively sampled surveys. Supported by these
experiments, we demonstrate that highly repeatable
time-lapse vintages are attainable despite the
presence of unknown calibration errors in the
positions of the shots. We assess the repeatability
quantitatively for two scenarios by studying the
impact of calibration errors on conventional dense
but irregularly sampled surveys and on low-cost
compressed surveys. To separate time-lapse effects
from calibration issues, we consider the idealized
case where the subsurface remains unchanged and the
practical situation where time-lapse changes are
restricted to a subset of the data. In both cases,
the quality of the recovered vintages and time-lapse
decreases gracefully for low-cost compressed surveys
with increasing calibration errors. Conversely, the
quality of vintages from expensive densely
periodically sampled surveys decreases more rapidly
as unknown and difficult to control calibration
errors increase.},
keywords = {time-lapse seismic, marine, random sampling, calibration errors, joint-recovery method},
doi = {10.1190/tle36080688.1},
note = {(The Leading Edge)},
url = {https://slim.gatech.edu/Publications/Public/Journals/TheLeadingEdge/2017/oghenekohwo2017hrt/oghenekohwo2017hrt.html}
}
@ARTICLE{kumar2016bls,
author = {Rajiv Kumar and Oscar Lopez and Damek Davis and Aleksandr Y. Aravkin and Felix J. Herrmann},
title = {Beating level-set methods for {5D} seismic data interpolation: a primal-dual alternating approach},
journal = {IEEE Transactions on Computational Imaging},
year = {2017},
month = {04},
abstract = {Acquisition cost is a crucial bottleneck for seismic
workflows, and low-rank formulations for data
interpolation allow practitioners to 'fill in' data
volumes from critically subsampled data acquired in
the field. Tremendous size of seismic data volumes
required for seismic processing remains a major
challenge for these techniques. Residual-constrained
formulations require less parameter tuning when the
target noise floor is known. We propose a new
approach to solve residual constrained formulations
for interpolation. We represent the data volume in a
compressed manner using low-rank matrix factors, and
build a block-coordinate algorithm with constrained
convex subproblems that are solved with a
primal-dual splitting scheme. The develop
optimization framework works on the whole seismic
temporal frequency slices and does not require
windowing or non-trivial sorting of seismic
data. The new approach is competitive with state of
the art level-set algorithms that interchange the
role of objectives with constraints. We use the new
algorithm to successfully interpolate a large scale
5D seismic data volume (upto 1010 data-points),
generated from the geologically complex synthetic 3D
Compass velocity model, where 80\% of the data has
been removed. We also develop a robust extension of
the primal-dual approach to deal with the outliers
(or noise) in the data.},
keywords = {matrix completion, seismic data, seismic trace interpolation, alternating minimization, primal-dual splitting},
note = {(published online in IEEE Transactions on Computational Imaging)},
doi = {10.1109/TCI.2017.2693966},
url = {https://slim.gatech.edu/Publications/Public/Journals/IEEETransComputationalImaging/2017/kumar2016bls/kumar2016bls.pdf}
}
@ARTICLE{louboutin2016ppf,
author = {Mathias Louboutin and Michael Lange and Felix J. Herrmann and Navjot Kukreja and Gerard Gorman},
title = {Performance prediction of finite-difference solvers for different computer architectures},
journal = {Computers \& Geosciences},
year = {2017},
month = {08},
volume = {105},
pages = {148-157},
abstract = {The life-cycle of a partial differential equation (PDE)
solver is often characterized by three development
phases: the development of a stable numerical
discretization; development of a correct (verified)
implementation; and the optimization of the
implementation for different computer
architectures. Often it is only after significant
time and effort has been invested that the
performance bottlenecks of a PDE solver are fully
understood, and the precise details varies between
different computer architectures. One way to
mitigate this issue is to establish a reliable
performance model that allows a numerical analyst to
make reliable predictions of how well a numerical
method would perform on a given computer
architecture, before embarking upon potentially long
and expensive implementation and optimization
phases. The availability of a reliable performance
model also saves developer effort as it both informs