-
Notifications
You must be signed in to change notification settings - Fork 1
/
masterthesis.bib
659 lines (625 loc) · 37.3 KB
/
masterthesis.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
% This file was created with JabRef 2.9.
% Encoding: MacRoman
@MASTERSTHESIS{zhang2018THlss,
author = {Yiming Zhang},
title = {Large-scale seismic data compression: application to full waveform inversion and extended image volume},
school = {The University of British Columbia},
year = {2018},
month = {04},
address = {Vancouver},
abstract = {Conventional oil and gas fields are increasingly difficult to explore and im- age, resulting in a call for more complex wave-equation based inversion al- gorithms that require dense long-o↵set samplings. Consequently, there is an exponential growth in the size of data volumes and prohibitive demands on computational resources. In this work, we propose a method to com- press and process seismic data directly in a low-rank tensor format, which drastically reduces the amount of storage required to represent the data. We first outline how seismic data exhibits low-rank structure in a particular transform-domain, which can be exploited to compress the dense data in one extremely storage-efficient tensor format when the data is fully sampled. In the more realistic case of missing data, we can use interpolation techniques based on the same tensor format to recover fully sampled data volume in compressed form. In either case, once we have our data represented in its compressed tensor form, we design an algorithm to extract source or receiver gathers directly from the compressed parameters. This extraction process can be done on-the-fly directly on the compressed data, in the full wave- form inversion context, and does not require scanning through the entire dataset in order to form shot gathers. To the best of our knowledge, this work is one of the first major contributions to working with seismic data applications directly in the compressed domain without reconstructing the entire data volume. We use a stochastic inversion approach, which works with small subsets of source experiments at each iteration, further to reduce the computational and memory costs of full waveform inversion. We also demonstrate how this data compression and extraction technique can be ap- plied to forming full subsurface image gathers through probing techniques.},
keywords = {MSc, thesis, compression, FWI, extended image volume, on-the-fly, multilinear algebra, Hierarchical Tucker},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2018/zhang2018THlss/zhang2018THlss.pdf},
presentation = {https://slim.gatech.edu/Publications/Public/Thesis/2018/zhang2018THlss/zhang2018THlss_pres.pdf}
}
@MASTERSTHESIS{bougher2016THmla,
author = {Ben B. Bougher},
title = {Machine learning applications to geophysical data analysis},
school = {The University of British Columbia},
year = {2016},
month = {08},
address = {Vancouver},
abstract = {The sedimentary layers of the Earth are a complex
amorphous material formed from chaotic, turbulent,
and random natural processes. Exploration
geophysicists use a combination of assumptions,
approximate physical models, and trained pattern
recognition to extract useful information from
complex remote sensing data such as seismic and well
logs. In this thesis I investigate supervised and
unsupervised machine learning models in geophysical
data analysis and present two novel applications to
exploration geophysics. Firstly, interpreted well
logs from the Trenton-Black River study are used to
train a classifier that results in a success rate of
67\% at predicting stratigraphic units from gamma
ray logs. I use the scattering transform, a
multiscale analysis transform, to extract
discriminating features to feed a K-nearest
neighbour classifier. A second experiment frames a
conventional pre-stack seismic data characterization
workflow as an unsupervised machine learning problem
that is free from physical
assumptions. Conventionally, the Shuey model is used
to fit the angle dependent reflectivity response of
seismic data. I instead use principle component
based approaches to learn projections from the data
that improve classification. Results on the Marmousi
II elastic model and an industry field dataset show
that unsupervised learning models can be effective
at segmenting hydrocarbon reservoirs from seismic
data.},
keywords = {MSc, thesis, machine learning, PCA, AVA, well logs, scattering transform},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2016/bougher2016THmla/bougher2016THmla.pdf},
presentation = {https://slim.gatech.edu/Publications/Public/Thesis/2016/bougher2016THmla/bougher2016THmla_pres.pdf}
}
@MASTERSTHESIS{li2015THwmd,
author = {Xiaowei Li},
title = {A weighted $\ell_1$-minimization for distributed compressive sensing},
school = {The University of British Columbia},
year = {2015},
month = {09},
address = {Vancouver},
abstract = {Distributed Compressive Sensing (DCS) studies the
recovery of jointly sparse signals. Compared to
separate recovery, the joint recovery algorithms in
DCS are usually more effective as they make use of
the joint sparsity. In this thesis, we study a
weighted l1-minimization algorithm for the joint
sparsity model JSM-1 proposed by Baron et al. Our
analysis gives a sufficient null space property for
the joint sparse recovery. Furthermore, this
property can be extended to stable and robust
settings. We also presents some numerical
experiments for this algorithm.},
keywords = {MSc, thesis, weighted $\ell_1$, distributed compressive sensing},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2015/li2015THwmd/li2015THwmd.pdf}
}
@MASTERSTHESIS{hargreaves2014THssr,
author = {Brock Hargreaves},
title = {Sparse signal recovery: analysis and synthesis formulations with prior support information},
school = {The University of British Columbia},
year = {2014},
month = {04},
address = {Vancouver},
abstract = {The synthesis model for signal recovery has been the
model of choice for many years in compressive
sensing. Various weighting schemes using prior
support information to adjust the objective function
associated with the synthesis model have been shown
to improve the recovery of the signal in terms of
accuracy. Generally, even with no prior knowledge of
the support, iterative methods can build support
estimates and incorporate that into the recovery
which has also been shown to increase the speed and
accuracy of the recovery. However when the original
signal is sparse with respect to a redundant
dictionary (rather than an orthonormal basis) there
is a counterpart model to synthesis, namely the
analysis model, which has been less popular but has
recently attracted more attention. The analysis
model is much less understood and thus there are
fewer theorems available in both the context of
non-weighted and weighted signal recovery. In this
thesis, we investigate weighting in both the
analysis model and synthesis model in weighted
$\ell_1$-minimization. Theoretical guarantees on
reconstruction and various weighting strategies for
each model are discussed. We give conditions for
weighted synthesis recovery with frames which do not
require strict incoherency conditions, this is based
on recent results of regular synthesis with frames
using optimal dual $\ell_1$ analysis. A novel
weighting technique is introduced in the analysis
case which outperforms its traditional counterparts
in the case of seismic wavefield reconstruction. We
also introduce a weighted split Bregman algorithm
for analysis and optimal dual analysis. We then
investigate these techniques on seismic data and
synthetically created test data using a variety of
frames.},
keywords = {MSc, thesis, sparse, analysis, synthesis, weighted $\ell_1$},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2014/hargreaves2014THssr/hargreaves2014THssr.pdf}
}
@MASTERSTHESIS{petrenko2014THaih,
author = {Art Petrenko},
title = {Accelerating an iterative {Helmholtz} solver using reconfigurable hardware},
school = {The University of British Columbia},
year = {2014},
month = {04},
address = {Vancouver},
abstract = {An implementation of seismic wave simulation on a
platform consisting of a conventional host processor
and a reconfigurable hardware accelerator is
presented. This research is important in the field
of exploration for oil and gas resources, where a 3D
model of the subsurface of the Earth is frequently
required. By comparing seismic data collected in a
real-world survey with synthetic data generated by
simulated waves, it is possible to deduce such a
model. However this requires many time-consuming
simulations with different Earth models to find the
one that best fits the measured data. Speeding up
the wave simulations would allow more models to be
tried, yielding a more accurate estimate of the
subsurface. The reconfigurable hardware accelerator
employed in this work is a field programmable gate
array (FPGA). FPGAs are computer chips that consist
of electronic building blocks that the user can
configure and reconfigure to represent their
algorithm in hardware. Whereas a traditional
processor can be viewed as a pipeline for processing
instructions, an FPGA is a pipeline for processing
data. The chief advantage of the FPGA is that all
the instructions in the algorithm are already
hardwired onto the chip. This means that execution
time depends only on the amount of data to be
processed, and not on the complexity of the
algorithm. The main contribution is an
implementation of the well-known Kaczmarz row
projection algorithm on the FPGA, using techniques
of dataflow programming. This kernel is used as the
preconditioning step of CGMN, a modified version of
the conjugate gradients method that is used to solve
the time-harmonic acoustic isotropic constant
density wave equation. Using one FPGA-based
accelerator, the current implementation allows
seismic wave simulations to be performed over twice
as fast, compared to running on one Intel Xeon
E5-2670 core. I also discuss the effect of
modifications of the algorithm necessitated by the
hardware on the convergence properties of CGMN.
Finally, a specific plan for future work is set-out
in order to fully exploit the accelerator platform,
and the work is set in its larger context.},
keywords = {CG, CGMN, FPGA, Helmholtz, Kaczmarz, linear solver, Maxeler, MSc, thesis, wave equation},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2014/petrenko2014THaih/petrenko2014THaih.pdf},
presentation = {https://slim.gatech.edu/Publications/Public/Thesis/2014/petrenko2014THaih/petrenko2014THaih_pres.pdf}
}
@MASTERSTHESIS{miao2014THesi,
author = {Lina Miao},
title = {Efficient seismic imaging with spectral projector and joint sparsity},
school = {The University of British Columbia},
year = {2014},
month = {03},
address = {Vancouver},
abstract = {In this thesis, we investigate the potential of
improving the eciency of seismic imaging with two
advanced techniques: the spectral projector and the
'joint sparsity'. The spectral projector offers an
eigenvalue decomposition free computation routine
that can filter out unstable evanescent wave com-
ponents during wave equation based depth
extrapolation. 'Joint sparsity' aims to improve on
the pure sparsity promoting recovery by making use
of additional structure information of the
signal. Besides, a new sparsity optimization
algorithm - PQNL1 - is proposed to improve both
theoretical convergence rate and practical
performance for extremely large seismic imaging
problems.},
keywords = {MSc, thesis},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2014/miao2014THesi/miao2014THesi.pdf},
presentation = {https://slim.gatech.edu/Publications/Public/Thesis/2014/miao2014THesi/miao2014THesi_pres.pdf}
}
@MASTERSTHESIS{ghadermarzy2013THups,
author = {Navid Ghadermarzy},
title = {Using prior support information in compressed sensing},
school = {The University of British Columbia},
year = {2013},
month = {08},
address = {Vancouver},
abstract = {Compressed sensing is a data acquisition technique that
entails recovering estimates of sparse and
compressible signals from $n$ linear measurements,
significantly fewer than the signal ambient
dimension $N$. In this thesis we show how we can
reduce the required number of measurements even
further if we incorporate prior information about
the signal into the reconstruction
algorithm. Specifically, we study certain weighted
nonconvex $\ell_p$ minimization algorithms and a
weighted approximate message passing algorithm. In
Chapter 1 we describe compressed sensing as a
practicable signal acquisition method in application
and introduce the generic sparse approximation
problem. Then we review some of the algorithms used
in compressed sensing literature and briefly
introduce the method we used to incorporate prior
support information into these problems. In Chapter
2 we derive sufficient conditions for stable and
robust recovery using weighted $\ell_p$ minimization
and show that these conditions are better than those
for recovery by regular $\ell_p$ and weighted
$\ell_1$. We present extensive numerical
experiments, both on synthetic examples and on
audio, and seismic signals. In Chapter 3 we derive
weighted AMP algorithm which iteratively solves the
weighted $\ell_1$ minimization. We also introduce a
reweighting scheme for weighted AMP algorithms which
enhances the recovery performance of weighted
AMP. We also apply these algorithms on synthetic
experiments and on real audio signals.},
keywords = {MSc, thesis, compressed sensing, weighted $\ell_1$},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2013/ghadermarzy2013THups.pdf}
}
@MASTERSTHESIS{johnson2013THswr,
author = {James Johnson},
title = {Seismic wavefield reconstruction using reciprocity},
school = {The University of British Columbia},
year = {2013},
month = {03},
address = {Vancouver},
abstract = {The primary focus of most reflection seismic surveys is
to help locate hydrocarbon recourses. Due to an ever
increasing scarcity of these recourses, we must
increase the size and quality of our seismic
surveys. However, processing such large seismic data
volumes to accurately recover earth properties is a
painstaking and computationally intensive
process. Due to the way reflection seismic surveys
are conducted there are often holes in the collected
data, where traces are not recorded. This can be due
to physical or cost constraints. For some of the
initial stages of processing these missing traces
are of little consequence. However processes like
multiple prediction and removal, interferometric
ground roll prediction, and migration require
densely sampled data on a regular grid. Thus the
need to interpolate undersampled data cannot be
ignored. Using the fact that reflection seismic data
sets obey a reciprocal relationship in source and
receiver locations, combined with recent advances in
the field of compressed sensing, we show that
properly regularized the wavefield reconstruction
problem can be solved with a high degree of
accuracy. We exploit the compressible nature of
seismic data in the curvelet domain to solve
regularized l1 recovery problems that seek to match
the measured data and enforce the above mentioned
reciprocity. Using our method we were able to
achieve results with a 20.45 dB signal to noise
ratio when reconstructing a marine data set that had
50\% of its traces decimated. This is a 13.44 dB
improvement over using the same method run without
taking reciprocity into account.},
keywords = {MSc, thesis},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2013/johnson2013THswr.pdf}
}
@MASTERSTHESIS{alhashim09THsdp,
author = {Fadhel Abbas Alhashim},
title = {Seismic data processing with the parallel windowed curvelet transform},
school = {The University of British Columbia},
year = {2009},
month = {08},
address = {Vancouver},
abstract = {The process of obtaining high quality seismic images is
very challenging when exploring new areas that have
high complexities. The to be processed seismic data
comes from the field noisy and commonly incomplete.
Recently, major advances were accomplished in the
area of coherent noise removal, for example, Surface
Related Multiple Elimination (SRME). Predictive
multiple elimination methods, such as SRME, consist
of two steps: The first step is the prediction step,
in this step multiples are predicted from the
seismic data. The second step is the separation step
in which primary reflection and surface related
multiples are separated, this involves predicted
multiples from the first step to be matched with the
true multiples in the data and eventually removed. A
recent robust Bayesian wavefield separation method
have been recently introduced to improve on the
separation by matching methods. This method utilizes
the effectiveness of using the multi scale and multi
angular curvelet transform in processing seismic
images. The method produced excellent results and
improved multiple removal. A considerable problem in
the seismic processing field is the fact that
seismic data are large and require a correspondingly
large memory size and processing time. The fact that
curvelets are redundant also increases the need for
large memory to process seismic data. In this thesis
we propose a parallel approach based windowing
operator that divides large seismic data into
smaller more managable datasets that can fit in
memory so that it is possible to apply the Bayesian
separation process in parallel with minimal harm to
the image quality and data integrity. However, by
dividing the data, we introduce discontinuities. We
take these discontinuities into account and compare
two ways that different windows may communicate.
The first method is to communicate edge information
at only two steps, namely, data scattering and
gathering processes while applying the multiple
separation on each window separately. The second
method is to define our windowing operator as a
global operator, which exchanges window edge
information at each forward and inverse curvelet
transform. We discuss the trade off between the two
methods trying to minimize complexity and I/O time
spent in the process. We test our windowing operator
on a seismic denoising problem and then apply the
windowing operator on our sparse-domain Bayesian
primary-multiple separation.},
keywords = {MSc},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2009/alhashim09THsdp.pdf},
presentation = {https://slim.gatech.edu/Publications/Public/Thesis/2009/alhashim09THsdp_pres.pdf}
}
@MASTERSTHESIS{jumah2012THdre,
author = {Bander Jumah},
title = {Dimensionality-reduced estimation of primaries by sparse inversion},
school = {The University of British Columbia},
year = {2012},
month = {02},
address = {Vancouver},
abstract = {Data-driven methods—such as the estimation of primaries
by sparse inversion suffer from the curse of
dimensionality that leads to disproportional growth
in computational and storage demands when moving to
realistic 3D field data. To remove this fundamental
impediment, we propose a dimensionality-reduction
technique where the data matrix is approximated
adaptively by a randomized low-rank
factorization. Compared to conventional methods,
which need passes through all the data possibly
including on-the-fly interpolations for each
iteration, our approach has the advantage that the
passes is reduced to one to three. In addition, the
low-rank matrix factorization leads to considerable
reductions in storage and computational costs of the
matrix multiplies required by the sparse
inversion. Application of the proposed formalism to
synthetic and real data shows that significant
performance improvements in speed and memory use are
achievable at a low computational overhead required
by the low-rank factorization.},
keywords = {MSc, thesis},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2012/jumah2012THdre.pdf}
}
@MASTERSTHESIS{almatar10THesd,
author = {Mufeed H. AlMatar},
title = {Estimation of surface-free data by curvelet-domain matched filtering and sparse inversion},
school = {The University of British Columbia},
year = {2010},
month = {12},
address = {Vancouver},
abstract = {A recent robust multiple-elimination technique, based on
the underlying principle that relates primary
impulse response to total upgoing wavefield, tries
to change the paradigm that sees surface-related
multiples as noise that needs to be removed from the
data prior to imaging. This technique, estimation of
primaries by sparse inversion (EPSI), (van
Groenestijn and Verschuur, 2009; Lin and Herrmann,
2009), proposes an inversion procedure during which
the source function and surface- free impulse
response are directly calculated from the upgoing
wavefield using an alternating optimization
procedure. EPSI hinges on a delicate interplay
between surface-related multiples and pri-
maries. Finite aperture and other imperfections may
violate this relationship. In this thesis, we
investigate how to make EPSI more robust by
incorporating curvelet-domain matching in its
formulation. Compared to surface-related multiple
removal (SRME), where curvelet-domain matching was
used successfully, incorporating this step has the
additional advantage that matches multiples to
multiples rather than predicated multiples to total
data as in SRME.},
keywords = {MSc},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2010/almatar10THesd.pdf}
}
@MASTERSTHESIS{dupuis05THssc,
author = {Catherine Dupuis},
title = {Seismic singularity characterization with redundant dictionaries},
school = {The University of British Columbia},
year = {2005},
month = {07},
address = {Vancouver},
abstract = {We consider seismic signals as a superposition of
waveforms parameterized by their fractional-
orders. Each waveform models the reflection of a
seismic wave at a particular transition between two
lithological layers in the subsurface. The location
of the waveforms in the seismic signal corresponds
to the depth of the transitions in the subsurface,
whereas their fractional-order constitutes a measure
of the sharpness of the transitions. By considering
fractional-order transitions, we generalize the
zero-order transition model of the conventional
deconvolution problem, and aim at capturing the
different types of transitions. The goal is to
delineate and characterize transitions from seismic
signals by recovering the locations and
fractional-orders of its corresponding
waveforms. This problem has received increasing
interest, and several methods have been proposed,
including multi- and monoscale analysis based on
Mallat{\textquoteright}s wavelet transform modulus
maxima, and seismic atomic decomposition. We propose
a new method based on a two-step approach, which
divides the initial problem of delineating and
characterizing transitions over the whole seismic
signal, into two easier sub-problems. The algorithm
first partitions the seismic signal into its major
components, and then estimates the fractional-orders
and locations of each component. Both steps are
based on the sparse decomposition of seismic signals
in overcomplete dictionaries of waveforms parameter-
ized by their fractional-orders, and involve
$\ell_1$ minimizations solved by an iterative
thresholding algorithm. We present the method and
show numerical results on both synthetic and real
data.},
keywords = {MSc},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2005/dupuis05THssc.pdf}
}
@MASTERSTHESIS{kumar09THins,
author = {Vishal Kumar},
title = {Incoherent noise suppression and deconvolution using curvelet-domain sparsity},
school = {The University of British Columbia},
year = {2009},
month = {06},
address = {Vancouver},
abstract = {Curvelets are a recently introduced transform domain
that belongs to a family of multiscale and also
multidirectional data expansions. As such, curvelets
can be applied to resolution of the issues of
complicated seismic wavefronts. We make use of this
multiscale, multidirectional and hence sparsifying
ability of the curvelet transform to suppress
incoherent noise from crustal data where the
signal-to-noise ratio is low and to develop an
improved deconvolution procedure. Incoherent noise
present in seismic reflection data corrupts the
quality of the signal and can often lead to
misinterpretation. The curvelet domain lends itself
particularly well for denoising because coherent
seismic energy maps to a relatively small number of
significant curvelet coefficents.},
keywords = {MSc},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2009/kumar09THins.pdf}
}
@MASTERSTHESIS{lebed08THssr,
author = {Evgeniy Lebed},
title = {Sparse signal recovery in a transform domain},
school = {The University of British Columbia},
year = {2008},
month = {08},
address = {Vancouver},
abstract = {The ability to efficiently and sparsely represent
seismic data is becoming an increasingly important
problem in geophysics. Over the last thirty years
many transforms such as wavelets, curvelets,
contourlets, surfacelets, shearlets, and many other
types of x-lets have been developed. Such transform
were leveraged to resolve this issue of sparse
representations. In this work we compare the
properties of four of these commonly used
transforms, namely the shift-invariant wavelets,
complex wavelets, curvelets and surfacelets. We also
explore the performance of these transforms for the
problem of recovering seismic wavefields from
incomplete measurements.},
keywords = {MSc},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2008/lebed08THssr.pdf}
}
@MASTERSTHESIS{maysami08THlcs,
author = {Mohammad Maysami},
title = {Lithology constraints from seismic waveforms: application to opal-{A} to opal-{CT} transition},
school = {The University of British Columbia},
year = {2008},
month = {02},
address = {Vancouver},
abstract = {In this work, we present a new method for seismic
waveform characterization, which is aimed at
extracting detailed litho-stratigraphical
information from seismic data. We attempt to
estimate the lithological attributes from seismic
data according to our parametric representation of
stratigraphical horizons, where the parameter values
provide us with a direct link to nature of
lithological transitions. We test our method on a
seismic dataset with a strong diagenetic transition
(opal-A to opal-CT transition). Given some
information from cutting samples of well, we use a
percolation-based model to construct the elastic
profile of lithological transitions. Our goal is to
match parametric representation for the diagenetic
transition in both real data and synthetic data
given by these elastic profiles. This match may be
interpreted as a well-seismic tie, which reveals
lithological information about stratigraphical
horizons.},
keywords = {MSc},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2008/maysami08THlcs.pdf}
}
@MASTERSTHESIS{yarham08THsgs,
author = {Carson Yarham},
title = {Seismic ground-roll separation using sparsity promoting $\ell_1$ minimization},
school = {The University of British Columbia},
year = {2008},
month = {05},
address = {Vancouver},
abstract = {The removal of coherent noise generated by surface waves
in land based seismic is a prerequisite to imaging
the subsurface. These surface waves, termed as
ground roll, overlay important reflector information
in both the t-x and f-k domains. Standard techniques
of ground roll removal commonly alter reflector
information as a consequence of the ground roll
removal. We propose the combined use of the curvelet
domain as a sparsifying basis in which to perform
signal separation techniques that can preserve
reflector information while increasing ground roll
removal. We examine two signal separation
techniques, a block-coordinate relaxation method and
a Bayesian separation method. The derivations and
background for both methods are presented and the
parameter sensitivity is examined. Both methods are
shown to be effective in certain situations
regarding synthetic data and erroneous surface wave
predictions. The block-coordinate relaxation method
is shown to have ma jor weaknesses when dealing with
seismic signal separation in the presence of noise
and with the production of artifacts and reflector
degradation. The Bayesian separation method is shown
to improve overall separation for both seismic and
real data. The Bayesian separation scheme is used on
a real data set with a surface wave prediction
containing reflector information. It is shown to
improve the signal separation by recovering
reflector information while improving the surface
wave removal. The abstract contains a separate real
data example where both the block-coordinate
relaxation method and the Bayesian separation method
are compared.},
keywords = {MSc},
note = {(MSc)},
url = {https://slim.gatech.edu/Publications/Public/Thesis/2008/yarham08THsgs.pdf}
}
@MASTERSTHESIS{fenelon08msc,
author = {Lloyd Fenelon},
title = {Nonequispaced discrete curvelet transform for seismic data reconstruction},
howpublished = {BSc thesis, Ecole Nationale Superieure De Physique de Strasbourg},
month = {08},
year = {2008},
abstract = {Physical constraints during seismic acquisitions lead to
incomplete seismic datasets. Curvelet Reconstruction
with Sparsity promoting Inversion (CRSI) is one of
the most efficient interpolation method available to
recover complete datasets from data with missing
traces. The method uses in its definition the
curvelet transform which is well suited to process
seismic data. However, its main shortcoming is to
not be able to provide an accurate result if the
data are acquired at irregular positions. This come
from the curvelet transform implementation which
cannot handle this type of data. In this thesis the
implementation of the curvelet transform is modified
to offer the possibility to CRSI to give better
representation of seismic data for high quality
seismic imaging.},
bdsk-url-1 = {http://slim.gatech.edu/Publications/Public/Theses/2008/fenelon08msc.pdf},
date-added = {2008-09-03 16:18:08 -0700},
date-modified = {2008-09-03 16:25:10 -0700},
keywords = {SLIM, BSc},
pdf = {http://slim.gatech.edu/Publications/Public/Theses/2008/fenelon08msc.pdf}
}