-
Notifications
You must be signed in to change notification settings - Fork 4
/
paper.bib
105 lines (99 loc) · 11.3 KB
/
paper.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
@article{10.3389/fnins.2013.00267,
abstract = {Magnetoencephalography and electroencephalography (M/EEG) measure the weak electromagnetic signals generated by neuronal activity in the brain. Using these signals to characterize and locate neural activation in the brain is a challenge that requires expertise in physics, signal processing, statistics, and numerical methods. As part of the MNE software suite, MNE-Python is an open-source software package that addresses this challenge by providing state-of-the-art algorithms implemented in Python that cover multiple methods of data preprocessing, source localization, statistical analysis, and estimation of functional connectivity between distributed brain regions. All algorithms and utility functions are implemented in a consistent manner with well-documented interfaces, enabling users to create M/EEG data analysis pipelines by writing Python scripts. Moreover, MNE-Python is tightly integrated with the core Python libraries for scientific comptutation (NumPy, SciPy) and visualization (matplotlib and Mayavi), as well as the greater neuroimaging ecosystem in Python via the Nibabel package. The code is provided under the new BSD license allowing code reuse, even in commercial products. Although MNE-Python has only been under heavy development for a couple of years, it has rapidly evolved with expanded analysis capabilities and pedagogical tutorials because multiple labs have collaborated during code development to help share best practices. MNE-Python also gives easy access to preprocessed datasets, helping users to get started quickly and facilitating reproducibility of methods by other researchers. Full documentation, including dozens of examples, is available at {\textless}ext-link ext-link-type="uri" xlink:href="http://martinos.org/mne" xmlns:xlink="http://www.w3.org/1999/xlink"{\textgreater}http://martinos.org/mne{\textless}/ext-link{\textgreater}.},
author = {Gramfort, Alexandre and Luessi, Martin and Larson, Eric and Engemann, Denis and Strohmeier, Daniel and Brodbeck, Christian and Goj, Roman and Jas, Mainak and Brooks, Teon and Parkkonen, Lauri and H{\"{a}}m{\"{a}}l{\"{a}}inen, Matti},
doi = {10.3389/fnins.2013.00267},
issn = {1662-453X},
journal = {Frontiers in Neuroscience},
pages = {267},
title = {{MEG and EEG data analysis with MNE-Python}},
url = {https://www.frontiersin.org/article/10.3389/fnins.2013.00267},
volume = {7},
year = {2013}
}
@article{Perrin1989,
abstract = {Description of mapping methods using spherical splines, both to interpolate scalp potentials (SPs), and to approximate scalp current densities (SCDs). Compared to a previously published method using thin plate splines, the advantages are a very simple derivation of the SCD approximation, faster computing times, and greater accuracy in areas with few electrodes.},
author = {Perrin, F and Pernier, J and Bertrand, O and Echallier, J F},
doi = {10.1016/0013-4694(89)90180-6},
issn = {0013-4694 (Print)},
journal = {Electroencephalography and clinical neurophysiology},
keywords = {Computer Simulation,Electroencephalography,Electrophysiology,Humans,Scalp,Signal Processing, Computer-Assisted,physiology},
language = {eng},
month = {feb},
number = {2},
pages = {184--187},
pmid = {2464490},
title = {{Spherical splines for scalp potential and current density mapping.}},
volume = {72},
year = {1989}
}
@article{Jas2017,
abstract = {We present an automated algorithm for unified rejection and repair of bad trials in magnetoencephalography (MEG) and electroencephalography (EEG) signals. Our method capitalizes on cross-validation in conjunction with a robust evaluation metric to estimate the optimal peak-to-peak threshold - a quantity commonly used for identifying bad trials in M/EEG. This approach is then extended to a more sophisticated algorithm which estimates this threshold for each sensor yielding trial-wise bad sensors. Depending on the number of bad sensors, the trial is then repaired by interpolation or by excluding it from subsequent analysis. All steps of the algorithm are fully automated thus lending itself to the name Autoreject. In order to assess the practical significance of the algorithm, we conducted extensive validation and comparisons with state-of-the-art methods on four public datasets containing MEG and EEG recordings from more than 200 subjects. The comparisons include purely qualitative efforts as well as quantitatively benchmarking against human supervised and semi-automated preprocessing pipelines. The algorithm allowed us to automate the preprocessing of MEG data from the Human Connectome Project (HCP) going up to the computation of the evoked responses. The automated nature of our method minimizes the burden of human inspection, hence supporting scalability and reliability demanded by data analysis in modern neuroscience.},
author = {Jas, Mainak and Engemann, Denis A and Bekhti, Yousra and Raimondo, Federico and Gramfort, Alexandre},
doi = {10.1016/j.neuroimage.2017.06.030},
issn = {1095-9572 (Electronic)},
journal = {NeuroImage},
keywords = {Algorithms,Artifacts,Brain,Brain Mapping,Electroencephalography,Humans,Magnetoencephalography,Models, Neurological,Signal Processing, Computer-Assisted,methods,physiology},
language = {eng},
month = {oct},
pages = {417--429},
pmid = {28645840},
title = {{Autoreject: Automated artifact rejection for MEG and EEG data.}},
volume = {159},
year = {2017}
}
@article{10.3389/fninf.2015.00016,
abstract = {The technology to collect brain imaging and physiological measures has become portable and ubiquitous, opening the possibility of large-scale analysis of real-world human imaging. By its nature, such data is large and complex, making automated processing essential. This paper shows how lack of attention to the very early stages of an EEG preprocessing pipeline can reduce the signal-to-noise ratio and introduce unwanted artifacts into the data, particularly for computations done in single precision. We demonstrate that ordinary average referencing improves the signal-to-noise ratio, but that noisy channels can contaminate the results. We also show that identification of noisy channels depends on the reference and examine the complex interaction of filtering, noisy channel identification, and referencing. We introduce a multi-stage robust referencing scheme to deal with the noisy channel-reference interaction. We propose a standardized early-stage EEG processing pipeline (PREP) and discuss the application of the pipeline to more than 600 EEG datasets. The pipeline includes an automatically generated report for each dataset processed. Users can download the PREP pipeline as a freely available MATLAB library from {\textless}ext-link ext-link-type="uri" xlink:href="http://eegstudy.org/prepcode" xmlns:xlink="http://www.w3.org/1999/xlink"{\textgreater}http://eegstudy.org/prepcode{\textless}/ext-link{\textgreater}.},
author = {Bigdely-Shamlo, Nima and Mullen, Tim and Kothe, Christian and Su, Kyung-Min and Robbins, Kay A},
doi = {10.3389/fninf.2015.00016},
issn = {1662-5196},
journal = {Frontiers in Neuroinformatics},
pages = {16},
title = {{The PREP pipeline: standardized preprocessing for large-scale EEG analysis}},
url = {https://www.frontiersin.org/article/10.3389/fninf.2015.00016},
volume = {9},
year = {2015}
}
@article{10.1145/358669.358692,
author = {Fischler, Martin A. and Bolles, Robert C.},
title = {Random Sample Consensus: A Paradigm for Model Fitting with Applications to Image Analysis and Automated Cartography},
year = {1981},
issue_date = {June 1981},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {24},
number = {6},
issn = {0001-0782},
url = {https://doi.org/10.1145/358669.358692},
doi = {10.1145/358669.358692},
abstract = {A new paradigm, Random Sample Consensus (RANSAC), for fitting a model to experimental data is introduced. RANSAC is capable of interpreting/smoothing data containing a significant percentage of gross errors, and is thus ideally suited for applications in automated image analysis where interpretation is based on the data provided by error-prone feature detectors. A major portion of this paper describes the application of RANSAC to the Location Determination Problem (LDP): Given an image depicting a set of landmarks with known locations, determine that point in space from which the image was obtained. In response to a RANSAC requirement, new results are derived on the minimum number of landmarks needed to obtain a solution, and algorithms are presented for computing these minimum-landmark solutions in closed form. These results provide the basis for an automatic system that can solve the LDP under difficult viewing},
journal = {Commun. ACM},
month = jun,
pages = {381–395},
numpages = {15},
keywords = {scene analysis, model fitting, automated cartography, image matching, camera calibration, location determination}
}
@article{Bell1995,
abstract = {We derive a new self-organizing learning algorithm that maximizes the information transferred in a network of nonlinear units. The algorithm does not assume any knowledge of the input distributions, and is defined here for the zero-noise limit. Under these conditions, information maximization has extra properties not found in the linear case (Linsker 1989). The nonlinearities in the transfer function are able to pick up higher-order moments of the input distributions and perform something akin to true redundancy reduction between units in the output representation. This enables the network to separate statistically independent components in the inputs: a higher-order generalization of principal components analysis. We apply the network to the source separation (or cocktail party) problem, successfully separating unknown mixtures of up to 10 speakers. We also show that a variant on the network architecture is able to perform blind deconvolution (cancellation of unknown echoes and reverberation in a speech signal). Finally, we derive dependencies of information transfer on time delays. We suggest that information maximization provides a unifying framework for problems in "blind" signal processing.},
author = {Bell, A J and Sejnowski, T J},
doi = {10.1162/neco.1995.7.6.1129},
issn = {0899-7667 (Print)},
journal = {Neural computation},
keywords = {Algorithms,Humans,Learning,Models, Statistical,Neural Networks, Computer,Neurons,Probability,Problem Solving,Speech},
language = {eng},
month = {nov},
number = {6},
pages = {1129--1159},
pmid = {7584893},
title = {{An information-maximization approach to blind separation and blind deconvolution.}},
volume = {7},
year = {1995}
}
@inbook{doi:10.1142/9789812818041_0008,
abstract = { Abstract This text on some information theoretic approaches to neural coding is based on lectures given at the summer school "Neural Information Processing", Carg{\`{e}}se, July 1997, and on a talk given at the meeting "Towards a theoretical brain", Les Treilles, April 1995. },
author = {NADAL, J.-P. and PARGA, N},
booktitle = {Neuronal Information Processing},
doi = {10.1142/9789812818041_0008},
pages = {164--171},
title = {{SENSORY CODING: INFORMATION MAXIMIZATION AND REDUNDANCY REDUCTION}},
url = {https://www.worldscientific.com/doi/abs/10.1142/9789812818041{\_}0008}
}