From 490e102bca65c9f8cd43c0a3c042443774655d87 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Mon, 10 Jan 2022 19:53:10 +0100 Subject: [PATCH] Typos found by codespell Add a codespell section in setup.cfg in the process. --- doc/source/devel/index.rst | 12 ++--- doc/source/examples/apod_viewer_1win.rst | 2 +- doc/source/examples/apod_viewer_2win.rst | 2 +- doc/source/examples/integrate_1d.rst | 4 +- doc/source/examples/integrate_2d.rst | 2 +- doc/source/reference/bruker.rst | 6 +-- doc/source/reference/convert.rst | 2 +- doc/source/reference/nmrml.rst | 4 +- doc/source/reference/rnmrtk.rst | 4 +- doc/source/reference/simpson.rst | 4 +- doc/source/reference/tecmag.rst | 4 +- doc/source/reference/varian.rst | 4 +- doc/source/tutorial.rst | 2 +- .../bruker_processed_1d/dig_filter_remove.py | 2 +- examples/coadd/coadd_1d_pipe/coadd_1d.py | 2 +- examples/coadd/coadd_2d_pipe/coadd_2d.py | 2 +- .../coadd_pseudo3d_pipe/coadd_pseudo3d.py | 2 +- .../agilent2pipe_2d/agilent2pipe_2d.py | 2 +- .../agilent2pipe_2d_tppi.py | 2 +- .../agilent2pipe_3d/agilent2pipe_3d.py | 2 +- .../convert/bruker2pipe_1d/bruker2pipe_1d.py | 2 +- .../convert/bruker2pipe_2d/bruker2pipe_2d.py | 2 +- .../convert/bruker2pipe_3d/bruker2pipe_3d.py | 2 +- .../convert/pipe2sparky_2d/pipe2sparky_2d.py | 2 +- .../convert/pipe2sparky_3d/pipe2sparky_3d.py | 2 +- .../t1_measurements/extract_trajs.py | 2 +- .../fitting_data/t1_measurements/xy_s3e.com | 2 +- .../integration/integrate_1d/integrate_1d.py | 4 +- .../jbnmr_examples/make_jbnmr_zip_files.com | 4 +- .../jbnmr_examples/s11_strip_plots/README.rst | 2 +- .../s12-s15_relaxation_analysis/README.rst | 2 +- .../s12-s15_relaxation_analysis/fit_exp.py | 2 +- .../s5_seperate_interleaved/README.rst | 6 +-- .../s6_seperate_inner_phase/README.rst | 6 +-- .../s7-s9_s3e_processing/README.rst | 4 +- examples/processing/process_pipe_3d.py | 4 +- .../sample_applications/apod_viewer_1win.py | 4 +- .../sample_applications/apod_viewer_2win.py | 4 +- nmrglue/analysis/analysisbase.py | 10 ++-- nmrglue/analysis/leastsqbound.py | 8 +-- nmrglue/analysis/linesh.py | 18 +++---- nmrglue/analysis/lineshapes1d.py | 18 +++---- nmrglue/analysis/peakpick.py | 16 +++--- nmrglue/analysis/segmentation.py | 30 +++++------ nmrglue/fileio/bruker.py | 28 +++++----- nmrglue/fileio/fileiobase.py | 6 +-- nmrglue/fileio/glue.py | 10 ++-- nmrglue/fileio/jcampdx.py | 2 +- nmrglue/fileio/nmrml.py | 2 +- nmrglue/fileio/pipe.py | 32 +++++------ nmrglue/fileio/rnmrtk.py | 8 +-- nmrglue/fileio/simpson.py | 6 +-- nmrglue/fileio/sparky.py | 44 +++++++-------- nmrglue/fileio/spinsolve.py | 2 +- nmrglue/fileio/table.py | 4 +- nmrglue/fileio/tests/test_fileiobase.py | 2 +- nmrglue/fileio/varian.py | 30 +++++------ nmrglue/process/pipe_proc.py | 44 +++++++-------- nmrglue/process/proc_autophase.py | 2 +- nmrglue/process/proc_base.py | 54 +++++++++---------- nmrglue/process/proc_bl.py | 10 ++-- nmrglue/process/proc_lp.py | 38 ++++++------- nmrglue/util/xcpy.py | 4 +- setup.cfg | 3 ++ tests/pipe_proc_tests/tm.py | 2 +- tests/test_pipe_proc.py | 6 +-- 66 files changed, 282 insertions(+), 279 deletions(-) diff --git a/doc/source/devel/index.rst b/doc/source/devel/index.rst index 5c41982e..c0e559c7 100644 --- a/doc/source/devel/index.rst +++ b/doc/source/devel/index.rst @@ -1,8 +1,8 @@ .. _development-guide: -================== -Developement Guide -================== +================= +Development Guide +================= This guide provides instructions for setting up an environment for developing nmrglue and an overview of the project layout and contribution process. @@ -96,7 +96,7 @@ Unpack this archive in the ``examples`` directory. Run the ``make_links.sh`` shell script to make symbolic links to the test data which reused in a number of example. On operating systems which do not support symbolic links (Windows), the data in the test data directory will need to be -copied by hand into the appropiate locations. +copied by hand into the appropriate locations. Project Layout @@ -132,7 +132,7 @@ Suggestions When working with the nmrglue source code please consider the following when preparing patches. -* Coding Style : The nmrglue source code trys to follow the +* Coding Style : The nmrglue source code tries to follow the `PEP8 `_ style guide. Consider using a tool, such as `pep8 `__ or `pylint `_ to check your Python code against @@ -181,7 +181,7 @@ In addition, the location of the the test data sets must be specified in the In order to run all nmrglue unit tests, the tests data sets must be downloaded, unpacked, and the all conversions scripts contained in the archive must be run. Many of these scripts require additional NMR software -(NMRPipe, etc), see the ``README`` file in the test data achive for additional +(NMRPipe, etc), see the ``README`` file in the test data archive for additional details. A subset of the full test suite can be run without installing any additional software. diff --git a/doc/source/examples/apod_viewer_1win.rst b/doc/source/examples/apod_viewer_1win.rst index 39e1a9b8..c88a45d5 100644 --- a/doc/source/examples/apod_viewer_1win.rst +++ b/doc/source/examples/apod_viewer_1win.rst @@ -12,7 +12,7 @@ In this application users can examine graphically the apodization windows produced by the various window functions supported by NMRPipe. In this example the canvas in which the apodization windows are drawn and the location to input the apodization parameter are contained in the same window. The -:ref:`apod_viewer_2win` example has the canvas and input area in seperate +:ref:`apod_viewer_2win` example has the canvas and input area in separate windows. [:download:`source code <../../../examples/sample_applications/apod_viewer_1win.py>`] diff --git a/doc/source/examples/apod_viewer_2win.rst b/doc/source/examples/apod_viewer_2win.rst index a85707d8..36de90db 100644 --- a/doc/source/examples/apod_viewer_2win.rst +++ b/doc/source/examples/apod_viewer_2win.rst @@ -11,7 +11,7 @@ additional python modules like In this application users can examine graphically the apodization windows produced by the various window functions supported by NMRPipe. In this example the canvas in which the apodization windows are drawn and the location to input -the apodization parameter are contained in two seperate same window. The +the apodization parameter are contained in two separate same window. The :ref:`apod_viewer_1win` example has the canvas and input area in a single window. diff --git a/doc/source/examples/integrate_1d.rst b/doc/source/examples/integrate_1d.rst index f6ab9f3a..dfedce22 100644 --- a/doc/source/examples/integrate_1d.rst +++ b/doc/source/examples/integrate_1d.rst @@ -5,8 +5,8 @@ integration example: integrate_1d This example shows how to use nmrglue to integrate a 1D NMRPipe spectra. The script reads in ppm peak limits from ``limits.in`` and takes a simple -summation integral of each peak using the spectra contained in ``1d_data.ft``. The integration values are writting to ``area.out`` and a plot is make showing -the integration limits and values overlayed on the spectra to ``plot.png``. +summation integral of each peak using the spectra contained in ``1d_data.ft``. The integration values are writing to ``area.out`` and a plot is make showing +the integration limits and values overlaid on the spectra to ``plot.png``. The data used in this example is available for `download. `_ diff --git a/doc/source/examples/integrate_2d.rst b/doc/source/examples/integrate_2d.rst index 292d1703..a2e2cd7b 100644 --- a/doc/source/examples/integrate_2d.rst +++ b/doc/source/examples/integrate_2d.rst @@ -6,7 +6,7 @@ integration example: integrate_2d This example shows how to use nmrglue to integrate a 2D NMRPipe spectra. The script reads in point limits from ``limits.in`` and takes a simple summation integral of all points in each box described. The integrated -volumes are writting to ``volumes.out``. For a method to graphically examine +volumes are writing to ``volumes.out``. For a method to graphically examine these limits see :ref:`plot_2d_boxes`. Similarly to check the peak assignments see :ref:`plot_2d_assignments`. diff --git a/doc/source/reference/bruker.rst b/doc/source/reference/bruker.rst index fae9bdd6..15167afd 100644 --- a/doc/source/reference/bruker.rst +++ b/doc/source/reference/bruker.rst @@ -11,7 +11,7 @@ User Information User Functions ^^^^^^^^^^^^^^ -These are functions which are targetted for users of nmrglue. +These are functions which are targeted for users of nmrglue. .. autosummary:: :toctree: generated/ @@ -38,8 +38,8 @@ These are functions which are targetted for users of nmrglue. -Developer Infomation --------------------- +Developer Information +--------------------- .. include:: ../../../nmrglue/fileio/bruker.py :start-line: 11 diff --git a/doc/source/reference/convert.rst b/doc/source/reference/convert.rst index 221d58df..7bef910e 100644 --- a/doc/source/reference/convert.rst +++ b/doc/source/reference/convert.rst @@ -11,7 +11,7 @@ User Information User Classes ^^^^^^^^^^^^ -This class is targetted for users of nmrglue. +This class is targeted for users of nmrglue. .. autoclass:: converter :members: diff --git a/doc/source/reference/nmrml.rst b/doc/source/reference/nmrml.rst index 59868f13..d911d1a6 100644 --- a/doc/source/reference/nmrml.rst +++ b/doc/source/reference/nmrml.rst @@ -18,8 +18,8 @@ User Functions read -Developer Infomation --------------------- +Developer Information +--------------------- Developer Functions ^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/reference/rnmrtk.rst b/doc/source/reference/rnmrtk.rst index 018b06c1..b015f98c 100644 --- a/doc/source/reference/rnmrtk.rst +++ b/doc/source/reference/rnmrtk.rst @@ -27,8 +27,8 @@ User Functions create_dic -Developer Infomation --------------------- +Developer Information +--------------------- Developer Functions ^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/reference/simpson.rst b/doc/source/reference/simpson.rst index 23af90bc..c0e8c8b7 100644 --- a/doc/source/reference/simpson.rst +++ b/doc/source/reference/simpson.rst @@ -24,8 +24,8 @@ User Functions read_raw_bin_2d read_binary -Developer Infomation --------------------- +Developer Information +--------------------- Developer Functions ^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/reference/tecmag.rst b/doc/source/reference/tecmag.rst index 3c4ecdba..0db79a01 100644 --- a/doc/source/reference/tecmag.rst +++ b/doc/source/reference/tecmag.rst @@ -18,8 +18,8 @@ User Functions guess_udic -Developer Infomation --------------------- +Developer Information +--------------------- .. include:: ../../../nmrglue/fileio/tecmag.py :start-line: 4 diff --git a/doc/source/reference/varian.rst b/doc/source/reference/varian.rst index c361e771..8a069e9e 100644 --- a/doc/source/reference/varian.rst +++ b/doc/source/reference/varian.rst @@ -32,8 +32,8 @@ User Functions create_dic -Developer Infomation --------------------- +Developer Information +--------------------- .. include:: ../../../nmrglue/fileio/varian.py :start-line: 8 diff --git a/doc/source/tutorial.rst b/doc/source/tutorial.rst index 2fd9b572..324015ca 100644 --- a/doc/source/tutorial.rst +++ b/doc/source/tutorial.rst @@ -313,7 +313,7 @@ reference your data in more common NMR units nmrglue provides the >>> uc1 = ng.pipe.make_uc(dic,data,dim=1) We now have unit conversion objects for both axes in the 2D spectrum. We can -use these objects to determined the nearest point for a given unit: +use these objects to determine the nearest point for a given unit: >>> uc0("100.0 ppm") 1397 diff --git a/examples/bruker_processed_1d/dig_filter_remove.py b/examples/bruker_processed_1d/dig_filter_remove.py index dd225b53..18b3294a 100644 --- a/examples/bruker_processed_1d/dig_filter_remove.py +++ b/examples/bruker_processed_1d/dig_filter_remove.py @@ -24,7 +24,7 @@ pipe_precorr_frq = ng.pipe.read('pre.ft')[1][::-1] pipe_precorr_frq = ng.proc_autophase.autops(pipe_precorr_frq, 'peak_minima') -#---data processed using NMRPipe (post-procesing) +#---data processed using NMRPipe (post-processing) pipe_postcorr_frq = ng.pipe.read('post.ft')[1][::-1] pipe_postcorr_frq = ng.proc_autophase.autops(pipe_postcorr_frq, 'acme') diff --git a/examples/coadd/coadd_1d_pipe/coadd_1d.py b/examples/coadd/coadd_1d_pipe/coadd_1d.py index ff7c271d..27a35a8f 100755 --- a/examples/coadd/coadd_1d_pipe/coadd_1d.py +++ b/examples/coadd/coadd_1d_pipe/coadd_1d.py @@ -8,7 +8,7 @@ flist = glob.glob("test*.fid") flist.sort() -# initilize the new data +# initialize the new data dic, data = ng.pipe.read(flist[0]) coadd_data = np.zeros_like(data) coadd_dic = dict(dic) diff --git a/examples/coadd/coadd_2d_pipe/coadd_2d.py b/examples/coadd/coadd_2d_pipe/coadd_2d.py index ff7c271d..27a35a8f 100755 --- a/examples/coadd/coadd_2d_pipe/coadd_2d.py +++ b/examples/coadd/coadd_2d_pipe/coadd_2d.py @@ -8,7 +8,7 @@ flist = glob.glob("test*.fid") flist.sort() -# initilize the new data +# initialize the new data dic, data = ng.pipe.read(flist[0]) coadd_data = np.zeros_like(data) coadd_dic = dict(dic) diff --git a/examples/coadd/coadd_pseudo3d_pipe/coadd_pseudo3d.py b/examples/coadd/coadd_pseudo3d_pipe/coadd_pseudo3d.py index d2fcbcce..c1a295fa 100755 --- a/examples/coadd/coadd_pseudo3d_pipe/coadd_pseudo3d.py +++ b/examples/coadd/coadd_pseudo3d_pipe/coadd_pseudo3d.py @@ -16,7 +16,7 @@ # loop over the files for base_fname in flist: - # initilize the new data + # initialize the new data dic, data = ng.pipe.read(base_fname) coadd_data = np.zeros_like(data) coadd_dic = dict(dic) diff --git a/examples/convert/agilent2pipe_2d/agilent2pipe_2d.py b/examples/convert/agilent2pipe_2d/agilent2pipe_2d.py index ee9525dc..38c25bf8 100755 --- a/examples/convert/agilent2pipe_2d/agilent2pipe_2d.py +++ b/examples/convert/agilent2pipe_2d/agilent2pipe_2d.py @@ -17,7 +17,7 @@ udic[1]['car'] = 55.0 * 125.691; udic[0]['car'] = 120.0 * 50.648 udic[1]['label'] = '13C' ; udic[0]['label'] = '15N' -# create the converter object and initilize with Agilent data +# create the converter object and initialize with Agilent data C = ng.convert.converter() C.from_varian(dic, data, udic) diff --git a/examples/convert/agilent2pipe_2d_tppi/agilent2pipe_2d_tppi.py b/examples/convert/agilent2pipe_2d_tppi/agilent2pipe_2d_tppi.py index 9af24b5b..cae65463 100755 --- a/examples/convert/agilent2pipe_2d_tppi/agilent2pipe_2d_tppi.py +++ b/examples/convert/agilent2pipe_2d_tppi/agilent2pipe_2d_tppi.py @@ -17,7 +17,7 @@ u[1]['car'] = 101.274 * 125.681 ; u[0]['car'] = 101.274 * 125.681 u[1]['label'] = 'C13x' ; u[0]['label'] = 'C13y' -# create the converter object and initilize with Agilent data +# create the converter object and initialize with Agilent data C = ng.convert.converter() C.from_varian(dic, data, u) diff --git a/examples/convert/agilent2pipe_3d/agilent2pipe_3d.py b/examples/convert/agilent2pipe_3d/agilent2pipe_3d.py index 6014188f..928dd642 100755 --- a/examples/convert/agilent2pipe_3d/agilent2pipe_3d.py +++ b/examples/convert/agilent2pipe_3d/agilent2pipe_3d.py @@ -37,7 +37,7 @@ udic[0]['label'] = 'CA' -# create the converter object and initilize with Agilent data +# create the converter object and initialize with Agilent data C = ng.convert.converter() C.from_varian(dic, data, udic) diff --git a/examples/convert/bruker2pipe_1d/bruker2pipe_1d.py b/examples/convert/bruker2pipe_1d/bruker2pipe_1d.py index b5044032..e1c90e3a 100755 --- a/examples/convert/bruker2pipe_1d/bruker2pipe_1d.py +++ b/examples/convert/bruker2pipe_1d/bruker2pipe_1d.py @@ -15,7 +15,7 @@ udic[0]['car'] = 4.773 * 600.133 udic[0]['label'] = '1H' -# create the converter object and initilize with Bruker data +# create the converter object and initialize with Bruker data C = ng.convert.converter() C.from_bruker(dic, data, udic) diff --git a/examples/convert/bruker2pipe_2d/bruker2pipe_2d.py b/examples/convert/bruker2pipe_2d/bruker2pipe_2d.py index 878c524e..8d51a9d7 100755 --- a/examples/convert/bruker2pipe_2d/bruker2pipe_2d.py +++ b/examples/convert/bruker2pipe_2d/bruker2pipe_2d.py @@ -17,7 +17,7 @@ u[1]['car'] = 4.773 * 800.134 ; u[0]['car'] = 58.742 * 201.204 u[1]['label'] = '1H' ; u[0]['label'] = '13C' -# create the converter object and initilize with Bruker data +# create the converter object and initialize with Bruker data C = ng.convert.converter() C.from_bruker(dic, data, u) diff --git a/examples/convert/bruker2pipe_3d/bruker2pipe_3d.py b/examples/convert/bruker2pipe_3d/bruker2pipe_3d.py index 60b20d6b..0be99715 100755 --- a/examples/convert/bruker2pipe_3d/bruker2pipe_3d.py +++ b/examples/convert/bruker2pipe_3d/bruker2pipe_3d.py @@ -37,7 +37,7 @@ udic[0]['label'] = '13C' -# create the converter object and initilize with Bruker data +# create the converter object and initialize with Bruker data C = ng.convert.converter() C.from_bruker(dic, data, udic) diff --git a/examples/convert/pipe2sparky_2d/pipe2sparky_2d.py b/examples/convert/pipe2sparky_2d/pipe2sparky_2d.py index c6f5d7bb..3486cb1a 100755 --- a/examples/convert/pipe2sparky_2d/pipe2sparky_2d.py +++ b/examples/convert/pipe2sparky_2d/pipe2sparky_2d.py @@ -8,7 +8,7 @@ # Set the spectral parameters udic = ng.pipe.guess_udic(dic, data) -# create the converter object and initilize with NMRPipe data +# create the converter object and initialize with NMRPipe data C = ng.convert.converter() C.from_pipe(dic, data, udic) diff --git a/examples/convert/pipe2sparky_3d/pipe2sparky_3d.py b/examples/convert/pipe2sparky_3d/pipe2sparky_3d.py index 4bba3cef..0d5f3bff 100755 --- a/examples/convert/pipe2sparky_3d/pipe2sparky_3d.py +++ b/examples/convert/pipe2sparky_3d/pipe2sparky_3d.py @@ -9,7 +9,7 @@ # Set the spectral parameters udic = ng.pipe.guess_udic(dic, data) -# create the converter object and initilize with NMRPipe data +# create the converter object and initialize with NMRPipe data C = ng.convert.converter() C.from_pipe(dic, data, udic) diff --git a/examples/fitting_data/t1_measurements/extract_trajs.py b/examples/fitting_data/t1_measurements/extract_trajs.py index b92202c6..f96628b1 100755 --- a/examples/fitting_data/t1_measurements/extract_trajs.py +++ b/examples/fitting_data/t1_measurements/extract_trajs.py @@ -1,5 +1,5 @@ #! /usr/bin/env python -# Scipt to extract trajectories from a series a 2D spectrum. +# Script to extract trajectories from a series a 2D spectrum. import nmrglue as ng import numpy as np diff --git a/examples/fitting_data/t1_measurements/xy_s3e.com b/examples/fitting_data/t1_measurements/xy_s3e.com index eb917bec..4828bd52 100755 --- a/examples/fitting_data/t1_measurements/xy_s3e.com +++ b/examples/fitting_data/t1_measurements/xy_s3e.com @@ -5,7 +5,7 @@ # By: Jonathan Helmus (helmus.3@osu.edu) # data must be converted to nmrpipe data with twice the number of points -# in the indirect dimention using var2pipe and the A/B S3E block the intermost +# in the indirect dimension using var2pipe and the A/B S3E block the intermost # array # var2pipe sign alternates the Y vectors for States so we have: diff --git a/examples/integration/integrate_1d/integrate_1d.py b/examples/integration/integrate_1d/integrate_1d.py index 53428f01..4645f623 100755 --- a/examples/integration/integrate_1d/integrate_1d.py +++ b/examples/integration/integrate_1d/integrate_1d.py @@ -1,5 +1,5 @@ #! /usr/bin/env python -# Example scipt to show integration of a 1D spectrum +# Example script to show integration of a 1D spectrum import nmrglue as ng import numpy as np @@ -12,7 +12,7 @@ # read in the integration limits peak_list = np.recfromtxt("limits.in") -# determind the ppm scale +# determine the ppm scale uc = ng.pipe.make_uc(dic, data) ppm_scale = uc.ppm_scale() diff --git a/examples/jbnmr_examples/make_jbnmr_zip_files.com b/examples/jbnmr_examples/make_jbnmr_zip_files.com index 0a1735e1..b28e7779 100755 --- a/examples/jbnmr_examples/make_jbnmr_zip_files.com +++ b/examples/jbnmr_examples/make_jbnmr_zip_files.com @@ -32,7 +32,7 @@ s4_2d_plotting/test.ft2 mv jbnmr_s4_2d_plotting.zip ./zip_files/. -# Listing S5 seperate interleaved +# Listing S5 separate interleaved zip jbnmr_s5_seperate_interleaved.zip \ s5_seperate_interleaved/seperate.py \ s5_seperate_interleaved/README.rst \ @@ -44,7 +44,7 @@ s5_seperate_interleaved/arrayed_data.fid/fid mv jbnmr_s5_seperate_interleaved.zip ./zip_files/. -# Listing S6 seperate inner_phase +# Listing S6 separate inner_phase zip jbnmr_s6_seperate_inner_phase.zip \ s6_seperate_inner_phase/seperate.py \ s6_seperate_inner_phase/README.rst \ diff --git a/examples/jbnmr_examples/s11_strip_plots/README.rst b/examples/jbnmr_examples/s11_strip_plots/README.rst index 7894634a..ddb16998 100644 --- a/examples/jbnmr_examples/s11_strip_plots/README.rst +++ b/examples/jbnmr_examples/s11_strip_plots/README.rst @@ -11,7 +11,7 @@ this example a three 3D solid state NMR spectra are visualized as strip plots. Instructions ------------ -Process each of the three 3D spectra by decending into the appopiate directory +Process each of the three 3D spectra by descending into the appopiate directory and executing the `fid.com` followed by the `xy.com` NMRPipe scripts. diff --git a/examples/jbnmr_examples/s12-s15_relaxation_analysis/README.rst b/examples/jbnmr_examples/s12-s15_relaxation_analysis/README.rst index 39ba8f2c..7b4454df 100644 --- a/examples/jbnmr_examples/s12-s15_relaxation_analysis/README.rst +++ b/examples/jbnmr_examples/s12-s15_relaxation_analysis/README.rst @@ -6,7 +6,7 @@ Introduction This example is taken from Listing S12 - S15 in the 2013 JBNMR nmrglue paper. In this example a series of 3D NMRPipe files containing relaxation trajectories -for a solid state NMR experment and analyzed. +for a solid state NMR experiment and analyzed. diff --git a/examples/jbnmr_examples/s12-s15_relaxation_analysis/fit_exp.py b/examples/jbnmr_examples/s12-s15_relaxation_analysis/fit_exp.py index 8f2b84cc..31b6598c 100644 --- a/examples/jbnmr_examples/s12-s15_relaxation_analysis/fit_exp.py +++ b/examples/jbnmr_examples/s12-s15_relaxation_analysis/fit_exp.py @@ -28,7 +28,7 @@ def residuals(p, y, x): peak = filename[:3] print "Fitting Peak:", peak - # fit the trajectory using contrainted least squares optimization + # fit the trajectory using constrained least squares optimization trajectory = np.loadtxt(filename) x, ier = leastsqbound(residuals, x0, bounds=bounds, args=(trajectory, relaxation_times)) diff --git a/examples/jbnmr_examples/s5_seperate_interleaved/README.rst b/examples/jbnmr_examples/s5_seperate_interleaved/README.rst index 8db2020d..1e38b136 100644 --- a/examples/jbnmr_examples/s5_seperate_interleaved/README.rst +++ b/examples/jbnmr_examples/s5_seperate_interleaved/README.rst @@ -1,16 +1,16 @@ -Seperated interleaved example +Separated interleaved example ============================= Introduction ------------ This example is taken from Listing S5 in the 2013 JBNMR nmrglue paper. In -this example a pseudo-3D NMRPipe data set is seperated into 2D data sets using +this example a pseudo-3D NMRPipe data set is separated into 2D data sets using the script `seperate.py`. Instructions ------------ -Execute `python seperate.py` to seperate the pseudo-3D data set into 2D data +Execute `python seperate.py` to separate the pseudo-3D data set into 2D data sets. Six directories with names `techo_XXXX.fid` will be created. diff --git a/examples/jbnmr_examples/s6_seperate_inner_phase/README.rst b/examples/jbnmr_examples/s6_seperate_inner_phase/README.rst index 7b27b32a..f516dd8d 100644 --- a/examples/jbnmr_examples/s6_seperate_inner_phase/README.rst +++ b/examples/jbnmr_examples/s6_seperate_inner_phase/README.rst @@ -1,16 +1,16 @@ -Seperated inner phase example +Separated inner phase example ============================= Introduction ------------ This example is taken from Listing S6 in the 2013 JBNMR nmrglue paper. In -this example a pseudo-3D NMRPipe data set is seperated into 2D data sets using +this example a pseudo-3D NMRPipe data set is separated into 2D data sets using the script `seperate.py` where there is an innermost quadrature phase loop. Instructions ------------ -Execute `python seperate.py` to seperate the pseudo-3D data set into 2D data +Execute `python seperate.py` to separate the pseudo-3D data set into 2D data sets. Ten directories with names `nredor_XX.fid` will be created. diff --git a/examples/jbnmr_examples/s7-s9_s3e_processing/README.rst b/examples/jbnmr_examples/s7-s9_s3e_processing/README.rst index 6b984d66..c3471c31 100644 --- a/examples/jbnmr_examples/s7-s9_s3e_processing/README.rst +++ b/examples/jbnmr_examples/s7-s9_s3e_processing/README.rst @@ -6,7 +6,7 @@ Introduction This example is taken from Listing S7, S8 and S9 in the 2013 JBNMR nmrglue paper. In this example a 2D Agilent/Varian data set collect using a S3E filter -is seperated (`seperate_s3e.py`), converted to NMRPipe format (`Sparky file +is separated (`seperate_s3e.py`), converted to NMRPipe format (`Sparky file (`data.ucsf`) is converted to a NMRPipe file ('convert.py') and finally processed (`xy_s3e.py`). @@ -14,7 +14,7 @@ processed (`xy_s3e.py`). Instructions ------------ -Execute `python seperate_s3e.py` to seperate the S3E sum and difference +Execute `python seperate_s3e.py` to separate the S3E sum and difference spectra from data set in the Agilent/Varian `fid` file. This creates the files fid_dif and fid_sum. diff --git a/examples/processing/process_pipe_3d.py b/examples/processing/process_pipe_3d.py index 70304a14..b1ac4117 100755 --- a/examples/processing/process_pipe_3d.py +++ b/examples/processing/process_pipe_3d.py @@ -8,13 +8,13 @@ print "Processing XY planes..." for i, (dic, plane) in enumerate(xiter): - # process the direct dimention (x) + # process the direct dimension (x) dic, plane = ng.pipe_proc.zf(dic, plane, auto=True) dic, plane = ng.pipe_proc.ft(dic, plane, auto=True) dic, plane = ng.pipe_proc.ps(dic, plane, p0=0.0, p1=0.0) dic, plane = ng.pipe_proc.di(dic, plane) - # process the first indirect (y) dimention + # process the first indirect (y) dimension dic, plane = ng.pipe_proc.tp(dic, plane) dic, plane = ng.pipe_proc.zf(dic, plane, auto=True) dic, plane = ng.pipe_proc.ft(dic, plane, auto=True) diff --git a/examples/sample_applications/apod_viewer_1win.py b/examples/sample_applications/apod_viewer_1win.py index 50f56dd8..de22b4d9 100755 --- a/examples/sample_applications/apod_viewer_1win.py +++ b/examples/sample_applications/apod_viewer_1win.py @@ -119,11 +119,11 @@ def OnLimitCheck(self, event): self.size_2.SetValue(str(points)) def ApodChoose(self, event): - """ Set the choosen apodization type """ + """ Set the chosen apodization type """ self.InitApod(apod_list[self.qName2.GetCurrentSelection()]) def InitApod(self, qName): - """ Set the default parameter for a choosen apodization window """ + """ Set the default parameter for a chosen apodization window """ if qName == "SP": self.q1_1.Enable(True) diff --git a/examples/sample_applications/apod_viewer_2win.py b/examples/sample_applications/apod_viewer_2win.py index be537245..d65c949d 100755 --- a/examples/sample_applications/apod_viewer_2win.py +++ b/examples/sample_applications/apod_viewer_2win.py @@ -123,11 +123,11 @@ def OnLimitCheck(self, event): self.size_2.SetValue(str(points)) def ApodChoose(self, event): - """ Set the choosen apodization type """ + """ Set the chosen apodization type """ self.InitApod(apod_list[self.qName2.GetCurrentSelection()]) def InitApod(self, qName): - """ Set the default parameter for a choosen apodization window """ + """ Set the default parameter for a chosen apodization window """ if qName == "SP": self.q1_1.Enable(True) diff --git a/nmrglue/analysis/analysisbase.py b/nmrglue/analysis/analysisbase.py index c8dd5f36..990bf766 100644 --- a/nmrglue/analysis/analysisbase.py +++ b/nmrglue/analysis/analysisbase.py @@ -46,7 +46,7 @@ def neighbors(pt, shape, structure): def valid_pt(pt, shape): """ - Determind if a point (indices) is valid for a given shaped + Determine if a point (indices) is valid for a given shaped """ for i, j in zip(pt, shape): if i < 0: # index is not negative @@ -124,7 +124,7 @@ def slice2limits(slices): See Also -------- - limits2slice : Find a list of slices given minumum and maximum limits. + limits2slice : Find a list of slices given minimum and maximum limits. """ mins = [s.start for s in slices] maxs = [s.stop - 1 for s in slices] @@ -249,7 +249,7 @@ def __iter__(self): class ndwindow_index(object): """ - An N-dimensional interator object which returns the index of the window + An N-dimensional iterator object which returns the index of the window center and a :py:class:`ndwindow` slice array. See :py:class:`ndwindow` for additional documentation. @@ -291,7 +291,7 @@ def __iter__(self): class ndwindow_inside(object): """ - An N-dimentional iterator to slice arrays into uniform size windows. + An N-dimensional iterator to slice arrays into uniform size windows. Given the shape of an array and a window size, an 'ndwindow_inside' instance iterators over tuples of slices which slice an the array into @@ -354,7 +354,7 @@ def __iter__(self): class ndwindow_inside_index(object): """ - An N-dimensional interator object which returns the index of the window + An N-dimensional iterator object which returns the index of the window top-left and a :py:class:`ndwindow_inside` slice array. Similar to :py:class:`ndwindow_index` but reports top left index of diff --git a/nmrglue/analysis/leastsqbound.py b/nmrglue/analysis/leastsqbound.py index cee6eb75..708d6328 100644 --- a/nmrglue/analysis/leastsqbound.py +++ b/nmrglue/analysis/leastsqbound.py @@ -10,7 +10,7 @@ def _internal2external_grad(xi, bounds): """ - Calculate the internal (unconstrained) to external (constained) + Calculate the internal (unconstrained) to external (constrained) parameter gradiants. """ grad = empty_like(xi) @@ -198,9 +198,9 @@ def leastsqbound(func, x0, args=(), bounds=None, Dfun=None, full_output=0, min sum((ydata - f(xdata, params))**2, axis=0) params - Contraints on the parameters are enforced using an internal parameter list - with appropiate transformations such that these internal parameters can be - optimized without constraints. The transfomation between a given internal + Constraints on the parameters are enforced using an internal parameter list + with appropriate transformations such that these internal parameters can be + optimized without constraints. The transformation between a given internal parameter, p_i, and a external parameter, p_e, are as follows: With ``min`` and ``max`` bounds defined :: diff --git a/nmrglue/analysis/linesh.py b/nmrglue/analysis/linesh.py index b1233ef3..5fbb7522 100644 --- a/nmrglue/analysis/linesh.py +++ b/nmrglue/analysis/linesh.py @@ -156,7 +156,7 @@ def fit_spectrum(spectrum, lineshapes, params, amps, bounds, ampbounds, ---------- spectrum : array_like - NMR data. ndarray or emulated type, must be slicable. + NMR data. ndarray or emulated type, must be sliceable. lineshape :list List of lineshapes by label (str) or a lineshape class. See :py:func:`fit_NDregion` for details. @@ -187,7 +187,7 @@ def fit_spectrum(spectrum, lineshapes, params, amps, bounds, ampbounds, True to estimate errors for each lineshape parameter and amplitude. verb : bool, optional True to print a summary of each region fit, False (the default) - supresses all printing. + suppresses all printing. **kw : optional Additional keywords passed to the scipy.optimize.leastsq function. @@ -203,7 +203,7 @@ def fit_spectrum(spectrum, lineshapes, params, amps, bounds, ampbounds, amp_err : list, only returned when error_flag is True Estimated peak amplitude errors. iers : list - List of interger flag from scipy.optimize.leastsq indicating if the + List of integer flag from scipy.optimize.leastsq indicating if the solution was found for a given peak. 1,2,3,4 indicates that a solution was found. Other indicate an error. @@ -360,7 +360,7 @@ def fit_NDregion(region, lineshapes, params, amps, bounds=None, amp_err : list, only returned when error_flag is True Estimated peak amplitude errors. iers : list - List of interger flag from scipy.optimize.leastsq indicating if the + List of integer flag from scipy.optimize.leastsq indicating if the solution was found for a given peak. 1,2,3,4 indicates that a solution was found. Other indicate an error. @@ -413,7 +413,7 @@ def fit_NDregion(region, lineshapes, params, amps, bounds=None, else: ls_classes.append(l) - # determind the number of parameter in each dimension + # determine the number of parameter in each dimension dim_nparam = [c.nparam(l) for l, c in zip(shape, ls_classes)] # parse params @@ -598,7 +598,7 @@ def sim_NDregion(shape, lineshapes, params, amps): else: ls_classes.append(l) - # determind the number of parameters in each dimension. + # determine the number of parameters in each dimension. dim_nparam = [c.nparam(l) for l, c in zip(shape, ls_classes)] # parse the params parameter @@ -664,7 +664,7 @@ def split_list(l, N): def calc_errors(region, ls_classes, p, cov, n_peaks, wmask): """ - Calcuate the parameter errors from the standard errors of the estimate. + Calculate the parameter errors from the standard errors of the estimate. Parameters ---------- @@ -737,7 +737,7 @@ def s_single_NDregion(p, shape, ls_classes): """ Simulate an N-dimensional region with a single peak. - This function is called repeatly by s_NDregion to build up a full + This function is called repeatedly by s_NDregion to build up a full simulated region. Parameters @@ -779,7 +779,7 @@ def f_NDregion(region, ls_classes, p0, p_bounds, n_peaks, wmask, **kw): """ Fit an N-dimensional regions containing one or more peaks. - Region is fit using a contrained Levenberg-Marquard optmization algorithm. + Region is fit using a constrained Levenberg-Marquard optimization algorithm. See :py:func:`fit_NDregion` for additional documentation. Parameters diff --git a/nmrglue/analysis/lineshapes1d.py b/nmrglue/analysis/lineshapes1d.py index 79ad3922..c58dfc1d 100644 --- a/nmrglue/analysis/lineshapes1d.py +++ b/nmrglue/analysis/lineshapes1d.py @@ -156,7 +156,7 @@ def sim_voigt_fwhm(x, x0, fwhm_g, fwhm_l): Parameters ---------- x : ndarray - Array of values at which to evalutate distribution. + Array of values at which to evaluate distribution. x0 : float Center of the distribution. fwhm_g : float @@ -196,7 +196,7 @@ def sim_voigt_sigmagamma(x, x0, sigma, gamma): Parameters ---------- x : ndarray - Array of values at which to evalutate distribution. + Array of values at which to evaluate distribution. x0 : float Center of the distribution sigma : float @@ -223,7 +223,7 @@ def sim_pvoigt_fwhm(x, x0, fwhm, eta): Simulate a Pseudo Voigt lineshape with unit height at the center. Simulates discrete points of the continuous Pseudo Voigt profile with unit - heigh at the center. Full-width at half-maximum (FWHM) of the Gaussian and + height at the center. Full-width at half-maximum (FWHM) of the Gaussian and Lorentzian distribution are used as the scale parameter as well as eta, the mixing factor. @@ -239,7 +239,7 @@ def sim_pvoigt_fwhm(x, x0, fwhm, eta): Parameters ---------- x : ndarray - Array of values at which to evalutate distribution. + Array of values at which to evaluate distribution. x0 : float Center of the distribution. fwhm : float @@ -262,11 +262,11 @@ def sim_pvoigt_fwhm(x, x0, fwhm, eta): # 1D Lineshape classes # ######################## # A lineshape class defines methods used to fit and simulate one dimensional -# lineshapes, which can be used to build multidimensinal lineshapes. These +# lineshapes, which can be used to build multidimensional lineshapes. These # classes should have the following 6 methods: # sim(self, M, p) - Using parameters in p simulate a lineshape of length M. -# nparams(self, M) - Determind the number of parameters needed for a length M +# nparams(self, M) - Determine the number of parameters needed for a length M # lineshape. # guessp(self, sig) - Estimate parameters of signal sig, these should be # parameter which might be used for initial least-squares @@ -310,7 +310,7 @@ class gauss_sigma(location_scale): scale parameter. See :py:func:`sim_gauss_sigma` for functional form and parameters. """ - name = "guassian" + name = "gaussian" def sim(self, M, p): x = np.arange(M) @@ -331,7 +331,7 @@ class gauss_fwhm(location_scale): scale parameter. See :py:func:`sim_gauss_fwhm` for functional form and parameters. """ - name = "guassian" + name = "gaussian" def sim(self, M, p): x = np.arange(M) @@ -512,7 +512,7 @@ def add_edge(self, p, limits): def remove_edge(self, p, limits): return p -# lineshape convience +# lineshape convenience gauss = gauss_fwhm lorentz = lorentz_fwhm voigt = voigt_fwhm diff --git a/nmrglue/analysis/peakpick.py b/nmrglue/analysis/peakpick.py index 704718e8..1cceec63 100644 --- a/nmrglue/analysis/peakpick.py +++ b/nmrglue/analysis/peakpick.py @@ -29,7 +29,7 @@ def pick(data, pthres, nthres=None, msep=None, algorithm='connected', Minimum peak height for negative peaks (typically a negative value). None to not detect negative peaks. msep : tuple of ints, optional - N-tuple of minimum peak seperations along each axis. Must be provided + N-tuple of minimum peak separations along each axis. Must be provided if algorithm is 'thresh' or 'thresh-fast'. algorithm : {'thres', thresh-fast', 'downward', 'connected'}, optional Peak picking algorithm to use. Default is 'connected'. @@ -255,7 +255,7 @@ def clusters(data, locations, pthres, nthres, d_struc=None, l_struc=None, locations : list List of peak locations. pthres : float - Postive peak threshold. None for no postive peaks. + Positive peak threshold. None for no positive peaks. nthres : float Negative peak threshold. None for no negative peaks. d_struc : ndarray, optional @@ -276,7 +276,7 @@ def clusters(data, locations, pthres, nthres, d_struc=None, l_struc=None, # make a binary array of regions above/below the noise thresholds if pthres is None: # negative peaks only input = data < nthres - elif nthres is None: # postive peaks only + elif nthres is None: # positive peaks only input = data > pthres else: # both positive and negative input = np.bitwise_or(data < nthres, data > pthres) @@ -416,7 +416,7 @@ def find_all_thres(data, thres, msep, find_segs=False): thres : float Threshold value for minimum peak height msep : tuple - Tuple of minimum peak seperations along each axis. + Tuple of minimum peak separations along each axis. find_segs : bool, optional True to find segments and return a list of slices which select that segment. False performs no segmentation discovery. @@ -431,7 +431,7 @@ def find_all_thres(data, thres, msep, find_segs=False): """ locations = [] # create an empty list of peak locations - wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*seperation+1 + wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1 # loop over the windows for idx, s in ndwindow_index(data.shape, wsize): @@ -455,7 +455,7 @@ def find_all_nthres(data, thres, msep, find_segs=False): """ locations = [] # create an empty list of peak locations - wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*seperation+1 + wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1 # loop over the windows for idx, s in ndwindow_index(data.shape, wsize): @@ -473,7 +473,7 @@ def find_all_thres_fast(data, thres, msep, find_segs=False): """ Fast version of find_all_thres. See :py:func:`find_all_thres`. """ - wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*seperation+1 + wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1 # find local maxima mask mx = ndimage.maximum_filter(data, size=wsize, mode='constant') == data @@ -496,7 +496,7 @@ def find_all_nthres_fast(data, thres, msep, find_segs=False): """ Fast version of find_all_nthres_fast. See :py:func:`find_all_thres`. """ - wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*seperation+1 + wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1 # find local maxima mask mn = ndimage.minimum_filter(data, size=wsize, mode='constant') == data diff --git a/nmrglue/analysis/segmentation.py b/nmrglue/analysis/segmentation.py index 5dff6a7d..f814e185 100644 --- a/nmrglue/analysis/segmentation.py +++ b/nmrglue/analysis/segmentation.py @@ -39,7 +39,7 @@ def find_all_connected(data, thres, find_segs=False, diag=False): Returns ------- locations : list - List of indicies of local maximum in each segment. + List of indices of local maximum in each segment. seg_slices : list, optional List of slices which extract a given segment from the data. Only returned when fig_segs is True. @@ -96,7 +96,7 @@ def find_all_nconnected(data, thres, find_segs=False, diag=False): Returns ------- locations : list - List of indicies of local maximum in each segment. + List of indices of local maximum in each segment. seg_slices : list, optional List of slices which extract a given segment from the data. Only returned when fig_segs is True. @@ -129,7 +129,7 @@ def find_all_nconnected(data, thres, find_segs=False, diag=False): # intensity. This can be though of as all points accessible by a water drop # following downward slopes from the initial node. -# Upward segmentation uses the same priciple except nodes must be below +# Upward segmentation uses the same principle except nodes must be below # the threshold an upward path must exist. @@ -161,7 +161,7 @@ def mark_dseg(mdata, map, pt, mark, structure): while Q: pt = Q.pop(0) v = mdata.data[pt] - # Check all neightbors + # Check all neighbors for new_pt in neighbors(pt, mdata.shape, structure): if mdata.mask[new_pt] == False and mdata[new_pt] < v: Q.append(new_pt) @@ -223,7 +223,7 @@ def find_all_downward(data, thres, find_segs=False, diag=False): Returns ------- locations : list - List of indicies of local maximum in each segment. + List of indices of local maximum in each segment. seg_slices : list, optional List of slices which extract a given segment from the data. Only returned when fig_segs is True. @@ -279,7 +279,7 @@ def mark_useg(mdata, map, pt, mark, structure): while Q: pt = Q.pop(0) v = mdata.data[pt] - # Check all neightbors + # Check all neighbors for new_pt in neighbors(pt, mdata.shape, structure): if mdata.mask[new_pt] == False and mdata[new_pt] > v: Q.append(new_pt) @@ -343,7 +343,7 @@ def find_all_upward(data, thres, find_segs=False, diag=False): Returns ------- locations : list - List of indicies of local maximum in each segment. + List of indices of local maximum in each segment. seg_slices : list, optional List of slices which extract a given segment from the data. Only returned when fig_segs is True. @@ -393,7 +393,7 @@ def find_downward(data, pt, thres, diag=False): Returns ------- nodes : list - Indicies of downward-connected nodes. + Indices of downward-connected nodes. """ # build structure array for defining feature connections @@ -416,7 +416,7 @@ def find_downward(data, pt, thres, diag=False): while Q: # loop until Q is empty pt = Q.pop(0) # remove first element of queue v = data[pt] # value at current node - for new_pt in neighbors(pt, shape, structure): # check all neightbors + for new_pt in neighbors(pt, shape, structure): # check all neighbors if thres < data[new_pt] < v and new_pt not in segment: Q.append(new_pt) segment.append(new_pt) @@ -441,7 +441,7 @@ def find_connected(data, pt, thres, diag=False): Returns ------- nodes : list - Indicies of connected nodes. + Indices of connected nodes. """ # build structure array for defining feature connections @@ -463,7 +463,7 @@ def find_connected(data, pt, thres, diag=False): while Q: # loop until Q is empty pt = Q.pop(0) # remove first element of queue - for new_pt in neighbors(pt, shape, structure): # check all neightbors + for new_pt in neighbors(pt, shape, structure): # check all neighbors if data[new_pt] > thres and new_pt not in segment: Q.append(new_pt) segment.append(new_pt) @@ -488,7 +488,7 @@ def find_nconnected(data, pt, thres, diag=False): Returns ------- nodes : list - Indicies of connected nodes. + Indices of connected nodes. """ # build structure array for defining feature connections @@ -510,7 +510,7 @@ def find_nconnected(data, pt, thres, diag=False): while Q: # loop until Q is empty pt = Q.pop(0) # remove first element of queue - for new_pt in neighbors(pt, shape, structure): # check all neightbors + for new_pt in neighbors(pt, shape, structure): # check all neighbors if data[new_pt] < thres and new_pt not in segment: Q.append(new_pt) segment.append(new_pt) @@ -535,7 +535,7 @@ def find_upward(data, pt, thres, diag=False): Returns ------- nodes : list - Indicies of upward-connected nodes. + Indices of upward-connected nodes. """ # build structure array for defining feature connections @@ -558,7 +558,7 @@ def find_upward(data, pt, thres, diag=False): while Q: # loop until Q is empty pt = Q.pop(0) # remove first element of queue v = data[pt] # value at current node - for new_pt in neighbors(pt, shape, structure): # check all neightbors + for new_pt in neighbors(pt, shape, structure): # check all neighbors if thres > data[new_pt] > v and new_pt not in segment: Q.append(new_pt) segment.append(new_pt) diff --git a/nmrglue/fileio/bruker.py b/nmrglue/fileio/bruker.py index e4da6465..737d5435 100644 --- a/nmrglue/fileio/bruker.py +++ b/nmrglue/fileio/bruker.py @@ -625,7 +625,7 @@ def read_procs_file(dir='.', procs_files=None): procs_files.append(pf) else: - # proc paths were explicitely given + # proc paths were explicitly given # just check if they exists for i, f in enumerate(procs_files): @@ -982,7 +982,7 @@ def guess_shape(dic): elif aq_mod == 1 or aq_mod == 3: cplex = True else: - raise ValueError("Unknown Aquisition Mode") + raise ValueError("Unknown Acquisition Mode") # file size try: @@ -1046,7 +1046,7 @@ def guess_shape(dic): shape[1] = fsize // (shape[3] * shape[2] * 4) shape[0] = fsize // (shape[3] * shape[2] * shape[1] * 4) - # if there in no pulse program parameters in dictionary return currect + # if there in no pulse program parameters in dictionary return current # shape after removing zeros if "pprog" not in dic or "loop" not in dic["pprog"]: return tuple([int(i) for i in shape if i > 1]), cplex @@ -1358,7 +1358,7 @@ def guess_shape_and_submatrix_shape(dic): """ Guess the data shape and the shape of the processed data submatrix. """ - if 'procs' not in dic: # unknow dimensionality and shapes + if 'procs' not in dic: # unknown dimensionality and shapes return None, None procs = dic['procs'] @@ -1610,7 +1610,7 @@ def write_binary(filename, dic, data, overwrite=False, big=True, # open the file for writing f = fileiobase.open_towrite(filename, overwrite=overwrite) - # convert objec to an array if it is not already one... + # convert object to an array if it is not already one... if not isinstance(data, np.ndarray): data = np.array(data) @@ -1737,7 +1737,7 @@ def __fgetitem__(self, slices): slices is a well formatted tuple of slices """ - # seperate the last slice from the first slices + # separate the last slice from the first slices lslice = slices[-1] fslice = slices[:-1] @@ -1745,7 +1745,7 @@ def __fgetitem__(self, slices): lfshape = self.fshape[-1] ffshape = self.fshape[:-1] - # find the output size and make a in/out nd interator + # find the output size and make a in/out nd iterator osize, nd_iter = fileiobase.size_and_ndtofrom_iter(ffshape, fslice) osize.append(len(range(lfshape)[lslice])) @@ -1977,7 +1977,7 @@ def remove_digital_filter(dic, data, truncate=True, post_proc=False): This typically produces a better looking spectrum but may remove useful data. False uses a non-truncated phase. post_proc : bool, optional - True if the digitial filter is to be removed post processing, i.e after + True if the digital filter is to be removed post processing, i.e after fourier transformation. The corrected FID will not be returned, only a corrected spectrum in the frequency dimension will be returned @@ -2032,7 +2032,7 @@ def rm_dig_filter( This typically produces a better looking spectrum but may remove useful data. False uses a non-truncated grpdly value. post_proc : bool, optional - True if the digitial filter is to be removed post processing, i.e after + True if the digital filter is to be removed post processing, i.e after fourier transformation. The corrected time domain data will not be returned, only the corrected spectrum in the frequency dimension will be returned @@ -2061,7 +2061,7 @@ def rm_dig_filter( # 1. FFT the data # 2. Apply a negative first order phase to the data. The phase is # determined by the GRPDLY parameter or found in the DSPFVS/DECIM - # loopup table. + # lookup table. # 3. Inverse FFT # (these first three steps are a frequency shift with a FFT first, fsh2) # 4. Round the applied first order phase up by two integers. For example @@ -2208,7 +2208,7 @@ def parse_jcamp_line(line, f): value = [] rline = line[line.index(")") + 1:] - # extract value from remainer of line + # extract value from remainder of line for t in rline.split(): value.append(parse_jcamp_value(t)) @@ -2419,7 +2419,7 @@ def read_pprog(filename): # open the file f = open(filename, 'r') - # initilize lists and dictionaries + # initialize lists and dictionaries var = dict() loop = [] incr = [[]] @@ -2427,7 +2427,7 @@ def read_pprog(filename): ph_extra = [[]] # loop over lines in pulseprogram looking for loops, increment, - # assigments and phase commands + # assignments and phase commands for line in f: # split line into comment and text and strip leading/trailing spaces @@ -2450,7 +2450,7 @@ def read_pprog(filename): # print(line,"--Blank, Comment or Include") continue - # see if we have quotes and have an assigment + # see if we have quotes and have an assignment # syntax "foo=bar" # add foo:bar to var dictionary if "\"" in text: diff --git a/nmrglue/fileio/fileiobase.py b/nmrglue/fileio/fileiobase.py index 1f3d58f9..168a99dd 100644 --- a/nmrglue/fileio/fileiobase.py +++ b/nmrglue/fileio/fileiobase.py @@ -494,7 +494,7 @@ def index2trace_opp(shape, index): # deal with the phase component phases = [v % 2 for v in index] nphase = index2trace_flat([2] * n, phases[::-1]) - # deal with the remainer + # deal with the remainder pindex = [v // 2 for v in index] pshape = [i // 2 for i in shape] nbase = index2trace_flat(pshape, pindex) @@ -524,7 +524,7 @@ def index2trace_reg(shape, index): # deal with the phase component phases = [v % 2 for v in index] nphase = index2trace_flat([2] * n, phases) - # deal with the remainer + # deal with the remainder pindex = [v // 2 for v in index] pshape = [i // 2 for i in shape] nbase = index2trace_flat(pshape, pindex) @@ -656,7 +656,7 @@ def __getitem__(self, key): else: rlist[i] = slice(v, v + 1, 1) - # pad the list with additional dimentions + # pad the list with additional dimensions for i in range(len(rlist), self.ndim): rlist.append(slice(None)) diff --git a/nmrglue/fileio/glue.py b/nmrglue/fileio/glue.py index 9c5d5f26..3691f291 100644 --- a/nmrglue/fileio/glue.py +++ b/nmrglue/fileio/glue.py @@ -4,7 +4,7 @@ glue files are HDF5 files with the spectral data stored in a dataset names 'spectrum' and any parameters stored in the dataset attributes. At minimum -the parameter dictionary must contain a ndim key with the dimentionality of +the parameter dictionary must contain a ndim key with the dimensionality of the data and a dictionry for each axis numbered (0,1,2...) with the following keys: @@ -35,7 +35,7 @@ def make_uc(dic, data, dim=-1): make a unit conversion object """ if dim == -1: - dim = data.ndim - 1 # last dimention + dim = data.ndim - 1 # last dimension size = dic[dim]["size"] cplex = dic[dim]["complex"] @@ -79,7 +79,7 @@ def read(filename, dataset="spectrum"): def read_lowmem(filename, dataset="spectrum"): """ - Read a glue file using mimimal memory usage + Read a glue file using minimal memory usage """ f = h5py.File(filename, 'r') dic = get_dic(f, dataset) @@ -206,7 +206,7 @@ def __fgetitem__(self, slices): """ Return ndarray of selected values - (sY,sX) is a well formated tuple of slices + (sY,sX) is a well formatted tuple of slices """ sY, sY = slices return self.Dataset[sY, sX] @@ -250,7 +250,7 @@ def __fgetitem__(self, slices): """ Return ndarray of selected values - (sZ, sY, sX) is a well formated tuple of slices + (sZ, sY, sX) is a well formatted tuple of slices """ sZ, sY, sX = slices return self.Dataset[sZ, sY, sX] diff --git a/nmrglue/fileio/jcampdx.py b/nmrglue/fileio/jcampdx.py index 10d26915..2bc0cb5d 100644 --- a/nmrglue/fileio/jcampdx.py +++ b/nmrglue/fileio/jcampdx.py @@ -96,7 +96,7 @@ def _readrawdic(filename): # try to split to key and value and check sanity keysplit = actual.split("=", 1) if len(keysplit) < 2: - warn("Bad JCAMP-DX line, cant split key and value correctly:" + + warn("Bad JCAMP-DX line, can't split key and value correctly:" + line) continue keystr = keysplit[0][2:] # remove "##" already here diff --git a/nmrglue/fileio/nmrml.py b/nmrglue/fileio/nmrml.py index 1902de06..4af0c942 100644 --- a/nmrglue/fileio/nmrml.py +++ b/nmrglue/fileio/nmrml.py @@ -19,7 +19,7 @@ def read(filename, data_dtype=None): Name of nmrML file to read. data_dtype : str, optional NumPy data type of the data. None, the default, will determine this - data type from the infomation in the file. Occasionally this + data type from the information in the file. Occasionally this information is incorrect and this argument can be used to explicitly supply this information. diff --git a/nmrglue/fileio/pipe.py b/nmrglue/fileio/pipe.py index 87f17d91..53133811 100644 --- a/nmrglue/fileio/pipe.py +++ b/nmrglue/fileio/pipe.py @@ -163,12 +163,12 @@ def make_uc(dic, data, dim=-1): """ if dim == -1: - dim = data.ndim - 1 # last dimention + dim = data.ndim - 1 # last dimension fn = "FDF" + str(int(dic["FDDIMORDER"][data.ndim - 1 - dim])) size = float(data.shape[dim]) - # check for quadrature in indirect dimentions + # check for quadrature in indirect dimensions if (dic[fn + "QUADFLAG"] != 1) and (dim != data.ndim - 1): size = size / 2. cplx = True @@ -235,7 +235,7 @@ def guess_udic(dic, data): for i in range(data.ndim): udic[i]["size"] = data.shape[i] # size from data shape - # determind NMRPipe axis name + # determine NMRPipe axis name fn = "FDF" + str(int(dic["FDDIMORDER"][data.ndim - 1 - i])) # directly corresponding @@ -274,7 +274,7 @@ def guess_udic(dic, data): def create_dic(udic, datetimeobj=datetime.datetime.now()): """ - Crate a NMRPipe parameter dictionary from universal dictionary + Create a NMRPipe parameter dictionary from universal dictionary This function does not update the dictionary keys that are unknown such as MIN/MAX, apodization and processing parameters, and sizes in none-current @@ -329,7 +329,7 @@ def add_axis_to_dic(dic, adic, n): """ Add an axis dictionary (adic) to a NMRPipe dictionary (dic) as axis n. """ - # determind F1,F2,F3,... + # determine F1,F2,F3,... fn = ["FDF2", "FDF1", "FDF3", "FDF4"][n] # parameter directly in dictionary @@ -430,7 +430,7 @@ def create_empty_dic(): dic["FDSPECNUM"] = 1. dic["FDFILECOUNT"] = 1. dic["FD2DVIRGIN"] = 1. - # dimention ordering + # dimension ordering dic["FDDIMORDER1"] = 2.0 dic["FDDIMORDER2"] = 1.0 @@ -598,7 +598,7 @@ def read_lowmem(filename): if order == 4: return read_lowmem_4D(filemask) - raise ValueError('unknown dimentionality: %s' % order) + raise ValueError('unknown dimensionality: %s' % order) # dimension specific reading @@ -1182,7 +1182,7 @@ def write_slice_3D(filemask, dic, data, shape, slices): # - Untranspose if dic["TRANSPOSED"] == 1 (call pipe_proc.tp) # - transpose (1,2,0) # - ORDER 1,2,3 = 3,1,2 and array -# - update "FDSLICECOUNT" and "FDSIZE" taking into accound complex packing +# - update "FDSLICECOUNT" and "FDSIZE" taking into account complex packing # - also update "FDSPECNUM" # - call write_slice3D # - store shape as self.max_iter @@ -1225,7 +1225,7 @@ def transpose_3D(dic, data, axes=(2, 1, 0)): # transpose the dictionary s3 = "FDDIMORDER" + str(int(3 - a1)) # 3rd axis is 0th axis in data_nd s2 = "FDDIMORDER" + str(int(3 - a2)) # 2nd axis is 1st axis in data_nd - s1 = "FDDIMORDER" + str(int(3 - a3)) # 1st axis is 3nd axis in data_nd + s1 = "FDDIMORDER" + str(int(3 - a3)) # 1st axis is 3rd axis in data_nd rdic["FDDIMORDER1"] = dic[s1] rdic["FDDIMORDER2"] = dic[s2] @@ -1512,7 +1512,7 @@ def unappend_data(data): def append_data(data): """ - Return data with last axis (-1) appeneded. + Return data with last axis (-1) appended. Data should be complex @@ -1746,7 +1746,7 @@ def __fgetitem__(self, slices): """ Return ndarray of selected values. - (sY, sX) is a well formated tuple of slices + (sY, sX) is a well formatted tuple of slices """ sY, sX = slices f = open(self.filename, 'rb') # open the file for reading @@ -1870,7 +1870,7 @@ def __fgetitem__(self, slices): """ Return ndarray of selected values - (sZ, sY, sX) is a well formated tuple of slices + (sZ, sY, sX) is a well formatted tuple of slices """ sZ, sY, sX = slices # determine which objects should be selected @@ -1954,7 +1954,7 @@ def __fgetitem__(self, slices): """ Return ndarray of selected values - (sZ, sY, sX) is a well formated tuple of slices + (sZ, sY, sX) is a well formatted tuple of slices """ sZ, sY, sX = slices f = open(self.filename, 'rb') # open the file for reading @@ -2088,7 +2088,7 @@ def __fgetitem__(self, slices): """ Return ndarray of selected values - (sZ, sY, sX) is a well formated tuple of slices + (sZ, sY, sX) is a well formatted tuple of slices """ sA, sZ, sY, sX = slices @@ -2121,7 +2121,7 @@ def __fgetitem__(self, slices): class pipestream_4d(fileiobase.data_nd): """ Emulate a ndarray objects without loading data into memory for low memory - reading of 4D NMRPipe data steams (one file 4D data sets). + reading of 4D NMRPipe data streams (one file 4D data sets). * slicing operations return ndarray objects. * can iterate over with expected results. @@ -2181,7 +2181,7 @@ def __fgetitem__(self, slices): """ Return ndarray of selected values - (sA, sZ, sY, sX) is a well formated tuple of slices + (sA, sZ, sY, sX) is a well formatted tuple of slices """ sA, sZ, sY, sX = slices diff --git a/nmrglue/fileio/rnmrtk.py b/nmrglue/fileio/rnmrtk.py index ea35aeaa..ad08e89b 100644 --- a/nmrglue/fileio/rnmrtk.py +++ b/nmrglue/fileio/rnmrtk.py @@ -1,5 +1,5 @@ """ -Fuctions for reading and writing Rowland NMR Toolkit (RNMRTK) files +Functions for reading and writing Rowland NMR Toolkit (RNMRTK) files """ from __future__ import division @@ -31,7 +31,7 @@ def make_uc(dic, data, dim=-1): data : ndarray Array of NMR data. dim : int, optional - Demension number to create unit conversion object for. Default is for + Dimension number to create unit conversion object for. Default is for the last dimension. Returns @@ -599,7 +599,7 @@ def __fgetitem__(self, slices): slices is a well formatted tuple of slices """ - # seperate the last slice from the leading slices + # separate the last slice from the leading slices lslice = slices[-1] fslice = slices[:-1] @@ -798,7 +798,7 @@ def parse_par_line(line, dic): dom = [s[0] for s in pl] # dom as it appears in the file dic['ndim'] = ndim = len(pl) dic['order'] = order = [int(s[1]) for s in pl] # dimension order - # dom in accending order (to match other parameter) + # dom in ascending order (to match other parameter) dic['dom'] = [dom[order.index(i)] for i in range(1, ndim + 1)] elif c == 'N': diff --git a/nmrglue/fileio/simpson.py b/nmrglue/fileio/simpson.py index 255f0db0..1ebafecd 100644 --- a/nmrglue/fileio/simpson.py +++ b/nmrglue/fileio/simpson.py @@ -136,7 +136,7 @@ def read_text(filename): """Read a SIMPSON text file. See :py:func:`read`.""" f = open(filename, 'r') # open the file - dic = {} # initalize dictionary of parameters + dic = {} # initialize dictionary of parameters # parse the header of the file, storing parameters in dictionary, stop # when we hit the data line @@ -242,7 +242,7 @@ def read_xyreim(filename): units = np.recarray((NI, NP), dtype=[('ni_unit', 'f8'), ('np_unit', 'f8')]) for l_idx, line in enumerate(f): - ni_idx, np_idx = divmod(l_idx, NP + 1) # determine indicies + ni_idx, np_idx = divmod(l_idx, NP + 1) # determine indices if np_idx == NP: # skip blank line between blocks continue # unpack the line and store @@ -308,7 +308,7 @@ def read_binary(filename): """Read a binary SIMPSON file. See :py:func:`read`.""" f = open(filename, 'r') # open the file - dic = {} # initalize dictionary of parameters + dic = {} # initialize dictionary of parameters # parse the header of the file, storing parameters in dictionary, stop # when we hit the data line diff --git a/nmrglue/fileio/sparky.py b/nmrglue/fileio/sparky.py index f7beb8e0..51a388b0 100644 --- a/nmrglue/fileio/sparky.py +++ b/nmrglue/fileio/sparky.py @@ -50,7 +50,7 @@ def make_uc(dic, data, dim=-1): """ if dim == -1: - dim = data.ndim - 1 # last dimention + dim = data.ndim - 1 # last dimension wdic = dic["w" + str(int(1 + dim))] @@ -127,7 +127,7 @@ def create_dic(udic, datetimeobj=datetime.datetime.now(), user='user'): """ dic = dict() - # determind shape of array + # determine shape of array shape = [udic[k]["size"] for k in range(udic["ndim"])] # populate the dictionary @@ -232,7 +232,7 @@ def calc_tshape(shape, kbyte_max=128): Shape of tile. """ - # Algorithm divides each dimention by 2 until under kbyte_max tile size. + # Algorithm divides each dimension by 2 until under kbyte_max tile size. s = np.array(shape, dtype="int") i = 0 while (s.prod() * 4. / 1024. > kbyte_max): @@ -269,7 +269,7 @@ def read(filename): # open the file f = open(filename, 'rb') - # determind the dimentionality + # determine the dimensionality n = fileheader2dic(get_fileheader(f))["naxis"] f.close() @@ -280,7 +280,7 @@ def read(filename): if n == 4: return read_4D(filename) - raise ValueError("unknown dimentionality: %s" % n) + raise ValueError("unknown dimensionality: %s" % n) def read_lowmem(filename): @@ -302,13 +302,13 @@ def read_lowmem(filename): See Also -------- read : Read a Sparky file. - write_lowmem : Write a Sparky file using mimimal memory. + write_lowmem : Write a Sparky file using minimal memory. """ # open the file f = open(filename, 'rb') - # determind the dimentionality + # determine the dimensionality n = fileheader2dic(get_fileheader(f))["naxis"] f.close() @@ -317,7 +317,7 @@ def read_lowmem(filename): if n == 3: return read_lowmem_3D(filename) - raise ValueError("unknown dimentionality: %s" % n) + raise ValueError("unknown dimensionality: %s" % n) def write(filename, dic, data, overwrite=False): @@ -349,7 +349,7 @@ def write(filename, dic, data, overwrite=False): if n == 3: return write_3D(filename, dic, data, overwrite=overwrite) - raise ValueError("unknown dimentionality: %s" % n) + raise ValueError("unknown dimensionality: %s" % n) def write_lowmem(filename, dic, data, overwrite=False): @@ -371,7 +371,7 @@ def write_lowmem(filename, dic, data, overwrite=False): See Also -------- write : Write a Sparky file. - read_lowmem : Read a Sparky file using mimimal amounts of memory. + read_lowmem : Read a Sparky file using minimal amounts of memory. """ # write also writes tile by tile... @@ -900,7 +900,7 @@ class sparky_2d(fileiobase.data_nd): filename : str Filename of 2D Sparky file. order : tuple, optional - Order of axes against file. None is equivelent to (0, 1). + Order of axes against file. None is equivalent to (0, 1). """ @@ -985,7 +985,7 @@ def __fgetitem__(self, slices): minY = iY * self.lentY maxY = (iY + 1) * self.lentY - # determind what elements are needed from this tile + # determine what elements are needed from this tile XinX = [i for i in gX if maxX > i >= minX] # values in gX XinT = [i - minX for i in XinX] # tile index values XinO = [gX.index(i) for i in XinX] # output indexes @@ -1133,7 +1133,7 @@ def __fgetitem__(self, slices): minZ = iZ * self.lentZ maxZ = (iZ + 1) * self.lentZ - # determind what elements are needed from this tile + # determine what elements are needed from this tile XinX = [i for i in gX if maxX > i >= minX] # values in gX XinT = [i - minX for i in XinX] # tile index values XinO = [gX.index(i) for i in XinX] # output indexes @@ -1199,7 +1199,7 @@ def get_tilen(f, n_tile, tw_tuple): position is later needed. """ - # determind the size of the tile in bytes + # determine the size of the tile in bytes tsize = 4 for i in tw_tuple: tsize = tsize * i @@ -1230,7 +1230,7 @@ def untile_data4D(data, tile_size, data_size): lentA, lentZ, lentY, lentX = tile_size lenA, lenZ, lenY, lenX = data_size - # determind the number of tiles in data + # determine the number of tiles in data ttX = int(np.ceil(lenX / float(lentX))) # total tiles in X dim ttY = int(np.ceil(lenY / float(lentY))) # total tiles in Y dim ttZ = int(np.ceil(lenZ / float(lentZ))) # total tiles in Z dim @@ -1357,7 +1357,7 @@ def find_tilen_2d(data, ntile, tile_size): Xt = ntile % ttX Yt = int(np.floor(ntile / ttX)) - # dimention limits + # dimension limits Xmin = int(Xt * lentX) Xmax = int((Xt + 1) * lentX) @@ -1394,7 +1394,7 @@ def tile_data2d(data, tile_size): """ lentY, lentX = tile_size - # determind the number of tiles in data + # determine the number of tiles in data ttX = int(np.ceil(data.shape[1] / float(lentX))) # total tiles in X dim ttY = int(np.ceil(data.shape[0] / float(lentY))) # total tiles in Y dim tt = ttX * ttY # total number of tiles @@ -1433,7 +1433,7 @@ def untile_data2D(data, tile_size, data_size): """ lentY, lentX = tile_size lenY, lenX = data_size - # determind the number of tiles in data + # determine the number of tiles in data ttX = int(np.ceil(lenX / float(lentX))) # total tiles in X dim ttY = int(np.ceil(lenY / float(lentY))) # total tiles in Y dim tt = ttX * ttY @@ -1504,7 +1504,7 @@ def find_tilen_3d(data, ntile, tile_size): Yt = int(np.floor(ntile / ttX)) % ttY Zt = int(np.floor(ntile / (ttX * ttY))) - # dimention limits + # dimension limits Xmin = int(Xt * lentX) Xmax = int((Xt + 1) * lentX) @@ -1544,7 +1544,7 @@ def tile_data3d(data, tile_size): """ lentZ, lentY, lentX = tile_size - # determind the number of tiles in data + # determine the number of tiles in data ttX = int(np.ceil(data.shape[2] / float(lentX))) # total tiles in X dim ttY = int(np.ceil(data.shape[1] / float(lentY))) # total tiles in Y dim ttZ = int(np.ceil(data.shape[0] / float(lentZ))) # total tiles in Z dim @@ -1585,7 +1585,7 @@ def untile_data3D(data, tile_size, data_size): lentZ, lentY, lentX = tile_size lenZ, lenY, lenX = data_size - # determind the number of tiles in data + # determine the number of tiles in data ttX = int(np.ceil(lenX / float(lentX))) # total tiles in X dim ttY = int(np.ceil(lenY / float(lentY))) # total tiles in Y dim ttZ = int(np.ceil(lenZ / float(lentZ))) # total tiles in Z dim @@ -1629,7 +1629,7 @@ def get_fileheader(f): Reads the 180 byte file header of a Sparky file """ - # file header as descriped in ucsffile.cc of sparky source + # file header as described in ucsffile.cc of sparky source # header is packed as follows: # ident(10s),naxis(c),ncomponents(c),encoding(c),version(c) # owner(9s),date(26s),comment(80s),pad(3x),seek_pos(l),scratch(40s), diff --git a/nmrglue/fileio/spinsolve.py b/nmrglue/fileio/spinsolve.py index 66954d9b..0100e199 100644 --- a/nmrglue/fileio/spinsolve.py +++ b/nmrglue/fileio/spinsolve.py @@ -40,7 +40,7 @@ def parse_spinsolve_par_line(line): """ - Parse lines in acqu.par and return a tuple (paramter name, parameter value) + Parse lines in acqu.par and return a tuple (parameter name, parameter value) """ line = line.strip() # Drop newline name, value = line.split("=", maxsplit=1) # Split at equal sign (and ignore further equals in attribute values) diff --git a/nmrglue/fileio/table.py b/nmrglue/fileio/table.py index 8d5c2c1f..ec75b5f7 100644 --- a/nmrglue/fileio/table.py +++ b/nmrglue/fileio/table.py @@ -171,10 +171,10 @@ def write(filename, comments, rec, overwrite=False): for c in comments: f.write(c) - # Determind the list of column names + # Determine the list of column names names = rec.dtype.names - # Determind the list of column formats + # Determine the list of column formats sizes = [rec.dtype[n].itemsize for n in names] kinds = [rec.dtype[n].kind for n in names] formats = [k + str(s) for s, k in zip(sizes, kinds)] diff --git a/nmrglue/fileio/tests/test_fileiobase.py b/nmrglue/fileio/tests/test_fileiobase.py index cabdceb9..0206ca65 100644 --- a/nmrglue/fileio/tests/test_fileiobase.py +++ b/nmrglue/fileio/tests/test_fileiobase.py @@ -17,7 +17,7 @@ def test_uc_from_freqscale(): """ from nmrglue.fileio.fileiobase import uc_from_freqscale - # read fequency test data + # read frequency test data dic, data = ng.pipe.read(NMRPIPE_1D_FREQ) # make udic and uc using uc_from_udic diff --git a/nmrglue/fileio/varian.py b/nmrglue/fileio/varian.py index 24af37bb..d5ed7f7d 100644 --- a/nmrglue/fileio/varian.py +++ b/nmrglue/fileio/varian.py @@ -221,7 +221,7 @@ def read(dir=".", fid_file="fid", procpar_file="procpar", read_blockhead=False, which is typically fine for most NMR experiments. See below for additional details. as_2d : bool, optional - True to return data as a 2D array ignorning the shape and torder + True to return data as a 2D array ignoring the shape and torder parameters. Returns @@ -256,7 +256,7 @@ def read(dir=".", fid_file="fid", procpar_file="procpar", read_blockhead=False, See Also -------- - read_lowmem : Read Agilent/Varian files using mimimal amounts of memory. + read_lowmem : Read Agilent/Varian files using minimal amounts of memory. write : Write Agilent/Varian files. """ @@ -319,7 +319,7 @@ def read_lowmem(dir=".", fid_file="fid", procpar_file="procpar", See Also -------- read : Read Agilent/Varian files. - write_lowmem : Write Agilent/Varian files using mimimal memory + write_lowmem : Write Agilent/Varian files using minimal memory """ if os.path.isdir(dir) is False: @@ -375,7 +375,7 @@ def write(dir, dic, data, fid_file="fid", procpar_file="procpar", See Also -------- - write_lowmem : Write Agilent/Varian files using mimimal memory + write_lowmem : Write Agilent/Varian files using minimal memory read : Read Agilent/Varian files. """ @@ -395,7 +395,7 @@ def write(dir, dic, data, fid_file="fid", procpar_file="procpar", def write_lowmem(dir, dic, data, fid_file="fid", procpar_file="procpar", torder=None, repack=False, overwrite=False): """ - Write Agilent/Varian files to a directory using mimimal amounts of memory. + Write Agilent/Varian files to a directory using minimal amounts of memory. Parameters ---------- @@ -619,7 +619,7 @@ def order_data(data, torder): if torder == 'flat' or torder == 'f': return data.reshape(nshape) - # make an emprt array to hold the 2D disk formated data matrix + # make an emprt array to hold the 2D disk formatted data matrix ndata = np.empty(nshape, dtype=data.dtype) # index2tuple converter @@ -735,7 +735,7 @@ def read_fid(filename, shape=None, torder='flat', as_2d=False, def read_fid_lowmem(filename, shape=None, torder='flat', as_2d=False, read_blockhead=False): """ - Read a Agilent/Varian binary (fid) file using mimimal amounts of memory. + Read a Agilent/Varian binary (fid) file using minimal amounts of memory. Parameters ---------- @@ -894,7 +894,7 @@ def write_fid(filename, dic, data, torder='flat', repack=False, correct=True, See Also -------- - write_fid_lowmem : Write a Agilent/Varian binary file using mimimal + write_fid_lowmem : Write a Agilent/Varian binary file using minimal amounts of memory write : Write Agilent/Varian files to a directory. @@ -928,7 +928,7 @@ def write_fid(filename, dic, data, torder='flat', repack=False, correct=True, # write the fileheader to file put_fileheader(f, dic2fileheader(dic)) - # determind data type + # determine data type dt = find_dtype(dic) if "blockheader" in dic and len(dic["blockheader"]) == data.shape[0]: @@ -955,7 +955,7 @@ def write_fid(filename, dic, data, torder='flat', repack=False, correct=True, def write_fid_lowmem(filename, dic, data, torder='f', repack=False, overwrite=False): """ - Write a Agilent/Varian binary (fid) file using mimimal amounts of memory. + Write a Agilent/Varian binary (fid) file using minimal amounts of memory. File is written trace by trace with each trace read from data before writing to reduce memory usage. @@ -1013,7 +1013,7 @@ def write_fid_lowmem(filename, dic, data, torder='f', repack=False, # write the fileheader to file put_fileheader(f, dic2fileheader(dic)) - # determind data type + # determine data type dt = find_dtype(dic) if "blockheader" in dic and len(dic["blockheader"]) == dic["nblocks"]: @@ -1834,7 +1834,7 @@ def uninterleave_data(data): ========== ============ """ - # determind the output dtype + # determine the output dtype rdt = data.dtype.name if rdt == 'int16' or rdt == "float32": @@ -2063,7 +2063,7 @@ def __fgetitem__(self, slices): slices is a well formatted tuple of slices """ - # seperate the last slice from the first slices + # separate the last slice from the first slices lslice = slices[-1] fslice = slices[:-1] @@ -2071,7 +2071,7 @@ def __fgetitem__(self, slices): lfshape = self.fshape[-1] ffshape = self.fshape[:-1] - # find the output size and make a in/out nd interator + # find the output size and make a in/out nd iterator osize, nd_iter = fileiobase.size_and_ndtofrom_iter(ffshape, fslice) osize.append(len(range(lfshape)[lslice])) @@ -2089,7 +2089,7 @@ def __fgetitem__(self, slices): # seek to the correct place in the file f.seek(ntrace * self.bbytes + 32) - # retrive trace and save to output + # retrieve trace and save to output trace = get_block(f, self.pts, self.nbh, self.fdtype, False) trace = uninterleave_data(trace) out[out_index] = trace[lslice] diff --git a/nmrglue/process/pipe_proc.py b/nmrglue/process/pipe_proc.py index c3e33bdd..1cda1e27 100644 --- a/nmrglue/process/pipe_proc.py +++ b/nmrglue/process/pipe_proc.py @@ -23,7 +23,7 @@ * ann Fourier Analysis by Neural Net * ebs EBS Reconstruction * mem Maximum Entropy - * ml Maximum likelyhood frequency + * ml Maximum likelihood frequency * poly Polynomail baseline correction * xyz2zyx 3D matrix transpose * ztp 3D matrix transpose @@ -84,12 +84,12 @@ def make_uc(dic, data, dim=-1): """ if dim == -1: - dim = data.ndim - 1 # last dimention + dim = data.ndim - 1 # last dimension fn = "FDF" + str(int(dic["FDDIMORDER"][data.ndim - 1 - dim])) size = float(data.shape[dim]) - # check for quadrature in indirect dimentions + # check for quadrature in indirect dimensions if (dic[fn + "QUADFLAG"] != 1) and (dim != data.ndim - 1): size = size / 2. cplx = True @@ -1031,7 +1031,7 @@ def fsh(dic, data, dir, pts, sw=True): Notes ----- - This function does not perfrom a Hilbert transfrom when data is complex, + This function does not perform a Hilbert transform when data is complex, NMRPipe's FSH function appear to. As such the results of the imaginary channel differs from NMRPipe. In addition MAX/MIN value are slightly different than those in NMRPipe. @@ -1085,7 +1085,7 @@ def ft(dic, data, auto=False, real=False, inv=False, alt=False, neg=False, data : ndarray Array of NMR data. auto : bool - True will choose mode automatically, not recomended. + True will choose mode automatically, not recommended. real : bool True to transform real-only data. inv : bool @@ -1432,7 +1432,7 @@ def ps(dic, data, p0=0.0, p1=0.0, inv=False, hdr=False, noup=False, ht=False, hdr : bool True to use phasing parameters from dic. noup : bool - True to not update phasing paramters in returned ndic. + True to not update phasing parameters in returned ndic. ht : bool True to perform a Hilbert transform to reconstruction imaginaries before phasing. @@ -1494,9 +1494,9 @@ def tp(dic, data, hyper=False, nohyper=False, auto=False, nohdr=False): data : ndarray Array of NMR data. hyper : bool - True to perfrom hypercomplex transpose. + True to perform hypercomplex transpose. nohyper : bool - True to supress hypercomplex transpose. + True to suppress hypercomplex transpose. auto : bool True to choose transpose mode automatically. nohdr : bool @@ -1531,7 +1531,7 @@ def tp(dic, data, hyper=False, nohyper=False, auto=False, nohdr=False): # unpack complex as needed data = np.array(p.c2ri(data), dtype="complex64") - # update the dimentionality and order + # update the dimensionality and order dic["FDSLICECOUNT"] = data.shape[0] if (data.dtype == 'float32') and (nohyper is True): # when nohyper is True and the new last dimension was complex @@ -1793,7 +1793,7 @@ def sol(dic, data, mode="low", fl=16, fs=1, head=0): data : ndarray Array of NMR data. mode : {'low'} - Filter mode. Currenlty only 'low' is implemented. + Filter mode. Currently only 'low' is implemented. fl : int Length of filter in points. fs : {1, 2, 3} @@ -2251,7 +2251,7 @@ def mir(dic, data, mode="left", invl=False, invr=False, sw=True): def mult(dic, data, r=1.0, i=1.0, c=1.0, inv=False, hdr=False, x1=1.0, xn='default'): """ - Multiple by a constant. + Multiply by a constant. Parameters ---------- @@ -2438,11 +2438,11 @@ def shuf(dic, data, mode=None): string Description ======= =================================== 'ri2c' Interleave real and imaginary data. - 'c2ri' Seperate real and imaginary data. + 'c2ri' Separate real and imaginary data. 'ri2rr' Append real and imaginary data. 'rr2ri' Unappend real and imaginary data. - 'exlr' Exchange left and right halfs. - 'rolr' Rotate left and right halfs. + 'exlr' Exchange left and right halves. + 'rolr' Rotate left and right halves. 'swap' Swap real and imaginary data. 'bswap' Byte-swap data. 'inv' Do nothing. @@ -2461,7 +2461,7 @@ def shuf(dic, data, mode=None): dic["FDSIZE"] = data.shape[-1] dic["FDREALSIZE"] = data.shape[-1] elif mode == "c2ri": - # seperate real and imaginary + # separate real and imaginary data = np.array(p.c2ri(data), dtype="complex64") # update the dictionary dic["FDQUADFLAG"] = 0.0 @@ -2549,7 +2549,7 @@ def sign(dic, data, ri=False, r=False, i=False, left=False, right=False, Notes ----- - All sign manupulation modes set True are applied in the order they appear + All sign manipulation modes set True are applied in the order they appear in the function parameter list. """ @@ -2692,7 +2692,7 @@ def img(dic, data, filter, dx=1.0, dy=1.0, kern=[1], conv=False, thres=None): ====== ================== median Median min Minimum - max Maximim + max Maximum amin Absolute Minimum amax Absolute Maximum range Range @@ -2707,7 +2707,7 @@ def img(dic, data, filter, dx=1.0, dy=1.0, kern=[1], conv=False, thres=None): thres = 0.0 # default value of 0.0 data = p.thres(data, thres) - if conv: # convolution with kernal + if conv: # convolution with kernel data = p.conv(data, kern, m="wrap") dic = update_minmax(dic, data) return dic, data @@ -2921,7 +2921,7 @@ def smo(dic, data, n=1, center=False): """ a = p.smo(data, n=n) - # NMRPipe doesn't truely smooth the left edge of the vector + # NMRPipe doesn't truly smooth the left edge of the vector for i in range(n): a[..., i] = data[..., 0:(n + i)].sum(axis=-1) / (n + 1 + i) if center: @@ -3148,11 +3148,11 @@ def lp2d(dic, data, xOrd=8, yOrd=8, xSize="default", ySize="default", ----- This function applies the LP2D procedure as described in: G. Zhu and A. Bax, Journal of Magnetic Resonance, 1992, 98, 192-199. - to the data matrix. The parameters and algorith used in NMRPipe's LP2D + to the data matrix. The parameters and algorithm used in NMRPipe's LP2D function are not well documented and are not replicated here. """ - # determind how many points to predict in each dimension + # determine how many points to predict in each dimension if xSize == "default": xpred = data.shape[1] else: @@ -3234,7 +3234,7 @@ def mac(dic, data, macro=None, noRd=False, noWr=False, all=False, **kwargs): is to act as a dispatch mechanism to other Python code so that the look and feel of nmrPipe is maintained. The -var and -str parameters are not used, as they can be passed directly to the macro as keyword - arguements. + arguments. """ if macro is None: return dic, data diff --git a/nmrglue/process/proc_autophase.py b/nmrglue/process/proc_autophase.py index 16f085f6..ae9408ee 100644 --- a/nmrglue/process/proc_autophase.py +++ b/nmrglue/process/proc_autophase.py @@ -40,7 +40,7 @@ def autops(data, fn, p0=0.0, p1=0.0, return_phases=False, peak_width=100, **kwar Some of the more useful ones for this use case: * disp : Bool - Turns on or off the printing of convergence messeges + Turns on or off the printing of convergence messages By default, this is set to True. * ftol : float Absolute error in fn between iterations that is acceptable for diff --git a/nmrglue/process/proc_base.py b/nmrglue/process/proc_base.py index 77aee69a..89b607dd 100644 --- a/nmrglue/process/proc_base.py +++ b/nmrglue/process/proc_base.py @@ -459,7 +459,7 @@ def fsh(data, pts): def fsh2(data, pts): """ - Frequency Shift by Fourier transform. Postive signed phase correction. + Frequency Shift by Fourier transform. Positive signed phase correction. Parameters ---------- @@ -591,7 +591,7 @@ def fft(data): performed by the NMRPipe processing package and the functions :py:func:`fft_positive` and :py:func:`ifft_positive`. - All of the Fourier transforms perfromed by nmrglue return results in 'NMR + All of the Fourier transforms performed by nmrglue return results in 'NMR order', in which the two half of the spectrum have been swapped and reversed. @@ -816,7 +816,7 @@ def ha(data): Notes ----- This function is very slow. Implement a Fast Walsh-Hadamard Transform - with sequency/Walsh ordering (FWHT_w) will result in much faster tranforms. + with sequency/Walsh ordering (FWHT_w) will result in much faster transforms. http://en.wikipedia.org/wiki/Walsh_matrix http://en.wikipedia.org/wiki/Fast_Hadamard_transform @@ -824,7 +824,7 @@ def ha(data): """ # implementation is a proof of concept and EXTEMEMLY SLOW - # determind the order and final size of input vectors + # determine the order and final size of input vectors ord = int(np.ceil(np.log2(data.shape[-1]))) # Walsh/Hadamard order max = 2 ** ord @@ -832,7 +832,7 @@ def ha(data): pad = max - data.shape[-1] zdata = zf(data, pad) - # Multiple each vector by the hadamard matrix + # Multiply each vector by the hadamard matrix nat = np.zeros(zdata.shape, dtype=zdata.dtype) H = scipy.linalg.hadamard(max) nat = np.dot(zdata, H) @@ -1570,12 +1570,12 @@ def unpack_complex(data): def c2ri(data): """ - Seperate interleaved real, imaginary data into complex array. + Separate interleaved real, imaginary data into complex array. Assumes data is real only, ignores imaginary portion of data. """ - # make a 1,1 array to determind dtype + # make a 1,1 array to determined dtype temp = np.array(data.flat[0] + data.flat[1] * 1j) s = list(data.shape) s[-1] = int(s[-1] / 2) @@ -1588,7 +1588,7 @@ def c2ri(data): def seperate_interleaved(data): """ - Seperate interleaved real, imaginary data into complex array. + Separate interleaved real, imaginary data into complex array. """ return c2ri(data) @@ -1602,7 +1602,7 @@ def pack_complex(data): def decode_States(data): """ - Decode data collected using States (seperates interleaved data). + Decode data collected using States (separates interleaved data). """ return c2ri(data) @@ -1627,7 +1627,7 @@ def rr2ri(data): """ Unappend real and imaginary data returning a complex array. """ - # make a 1,1 array to determind dtype + # make a 1,1 array to determined dtype temp = np.array(data.flat[0] + data.flat[1] * 1.j) s = list(data.shape) half = int(s[-1] / 2.0) @@ -1780,7 +1780,7 @@ def neg_alt(data): def abs(data): """ - Replace data with absolute value of data (abs of real, imag seperately) + Replace data with absolute value of data (abs of real, imag separately) """ data.real = np.abs(data.real) data.imag = np.abs(data.imag) @@ -1789,7 +1789,7 @@ def abs(data): def sign(data): """ - Replace data with sign (-1 or 1) of data (seperately on each channel) + Replace data with sign (-1 or 1) of data (separately on each channel) """ data.real = np.sign(data.real) data.imag = np.sign(data.imag) @@ -1822,7 +1822,7 @@ def coadd(data, clist, axis=-1): # there is probably a efficient way to do this with tile and inner # or scipy.ndimage.generic_filter - # algorith creates a empty array, then fills it element wise + # algorithm creates a empty array, then fills it element wise # with each factor from clist times the blocks selected s = list(data.shape) # data shape @@ -1871,7 +1871,7 @@ def conv(data, kern=[1.], m="wrap", c=0.0): """ Convolute data with kernel. - Real and imaginary components of data are convolved seperately. + Real and imaginary components of data are convolved separately. Parameters ---------- @@ -1903,7 +1903,7 @@ def corr(data, kern=[1.], m="wrap", c=0.0): """ Correlate data with a kernel (weights). - Real and imaginary components of data are correlated seperately. + Real and imaginary components of data are correlated separately. Parameters ---------- @@ -1937,7 +1937,7 @@ def filter_median(data, s=(1, 1), m="wrap", c=0.0): """ Apply a median filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -1965,7 +1965,7 @@ def filter_min(data, s=(1, 1), m="wrap", c=0.0): """ Apply a minimum filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -1993,7 +1993,7 @@ def filter_max(data, s=(1, 1), m="wrap", c=0.0): """ Apply a maximum filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -2021,7 +2021,7 @@ def filter_percentile(data, percentile, s=(1, 1), m="wrap", c=0.0): """ Apply a percentile filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -2053,7 +2053,7 @@ def filter_rank(data, rank, s=(1, 1), m="wrap", c=0.0): """ Apply a rank filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -2088,7 +2088,7 @@ def filter_amin(data, s=(1, 1), m="wrap", c=0.0): """ Apply an absolute minimum filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -2119,7 +2119,7 @@ def filter_amax(data, s=(1, 1), m="wrap", c=0.0): """ Apply an absolute maximum filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -2150,7 +2150,7 @@ def filter_range(data, s=(1, 1), m="wrap", c=0.0): """ Apply a range filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -2181,7 +2181,7 @@ def filter_avg(data, s=(1, 1), m="wrap", c=0.0): """ Apply an average filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -2212,7 +2212,7 @@ def filter_dev(data, s=(1, 1), m="wrap", c=0.0): """ Apply a standard deviation filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -2243,7 +2243,7 @@ def filter_sum(data, s=(1, 1), m="wrap", c=0.0): """ Apply a summation filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- @@ -2274,7 +2274,7 @@ def filter_generic(data, filter, s=(1, 1), m="wrap", c=0.0): """ Apply a generic filter. - Real and imaginary components are filtered seperately. + Real and imaginary components are filtered separately. Parameters ---------- diff --git a/nmrglue/process/proc_bl.py b/nmrglue/process/proc_bl.py index 6ce94392..ab6aff93 100644 --- a/nmrglue/process/proc_bl.py +++ b/nmrglue/process/proc_bl.py @@ -29,7 +29,7 @@ def base(data, nl, nw=0): Returns ------- ndata : ndarray - NMR data with first order baseline correction appied. For 2D data + NMR data with first order baseline correction applied. For 2D data baseline correction is applied for each trace along the last dimension. @@ -144,7 +144,7 @@ def med(data, mw=24, sf=16, sigma=5.0): """ Median baseline correction. - Algorith described in: Friedrichs, M.S. JBNMR 1995 5 147-153. + Algorithm described in: Friedrichs, M.S. JBNMR 1995 5 147-153. Parameters ---------- @@ -316,7 +316,7 @@ def sol_general(data, filter, w=16, mode='same'): which specific the filter, e.g. sol_boxcar. w : int, optional Filter length. Not used here but is used in solent filter functions - which specificy the filter, e.g. sol_boxcar. + which specify the filter, e.g. sol_boxcar. mode : {'valid', 'same', 'full'}, optional Convolution mode, 'same' should be used. @@ -395,7 +395,7 @@ def poly_fd(data): # # applies a polynomial baseline correction of the order specified by # argument -ord via an automated base-line detection method when used - # with argument -auto. The defauly is a forth-order polynomial. The + # with argument -auto. The default is a forth-order polynomial. The # automated base-line mode works as follows: a copy of a given vector is # divided into a series of adjacent sections, typically eight points wide. # The average value of each section is subtracted from all points in that @@ -420,5 +420,5 @@ def poly_fd(data): # 6. Classification threshold set to 1.5*std # 7. Qualify each block in centered vector as baseline only # (its std < thres) or not (std > thres) - # 8. Fit baseline only points to polynomial and substract off + # 8. Fit baseline only points to polynomial and subtract off raise NotImplementedError diff --git a/nmrglue/process/proc_lp.py b/nmrglue/process/proc_lp.py index 1ded9109..0c06219e 100644 --- a/nmrglue/process/proc_lp.py +++ b/nmrglue/process/proc_lp.py @@ -266,10 +266,10 @@ def lp_1d(trace, pred=1, slice=slice(None), order=8, mode="f", append="after", else: # form the LP equation matrix and vector D, d = make_Dd(x, order, mode) - a = find_lpc(D, d, method) # determind the LP prediction filter + a = find_lpc(D, d, method) # determine the LP prediction filter - # stablize roots if needed - if bad_roots is not None: # stablize roots if needed + # stabilize roots if needed + if bad_roots is not None: # stabilize roots if needed poles = find_roots(a, mode) # find roots (poles) poles = fix_roots(poles, bad_roots, fix_mode) # fix roots # reverse filter when calculated filter is in wrong direction @@ -314,7 +314,7 @@ def lp2d(data, pred, P, M, mirror='0', fix_points=True, method='svd'): Backward linear prediction using this method is not possible as the method depends on being able to mirror the data before the first collected point. In backwards mode this would correspond to being able to correctly - determind points after the last point which cannot be determinded using the + determine points after the last point which cannot be determined using the mirror method. A backward prediction matrix can be calculated but would not prove useful. @@ -340,7 +340,7 @@ def lp2d(data, pred, P, M, mirror='0', fix_points=True, method='svd'): data point. False leaved predicted points unaltered. method : {'svd', 'qr', 'cholesky', 'tls'} Method used to calculate the LP prediction matrix. See :py:func:`lp` - for a description of theses methods. + for a description of these methods. Returns ------- @@ -411,7 +411,7 @@ def extrapolate_2d(x, C, pred, fix_points, mirror): (new[i + P - 1, j + M])) # fill the column with the mirrored column so it can be read in the - # next interation of the loop + # next iteration of the loop new[:, j + M] = make_mirror(new[plane:, j + M], mirror) return new[plane:] @@ -462,9 +462,9 @@ def make_lp2d_Dd(x, P, M, mode='f'): return D, d -############################################# -# Cadzow/Minumum variance signal enhacement # -############################################# +############################################## +# Cadzow/Minimum variance signal enhancement # +############################################## def cadzow(data, M, K, niter, min_var=False): @@ -472,10 +472,10 @@ def cadzow(data, M, K, niter, min_var=False): Perform a (row wise) Cadzow-like signal enhancement on 1D or 2D data. Performs a Cadzow-like signal enhancement with optional adjustment - of singular values using the minimum variance method as desribed in: + of singular values using the minimum variance method as described in: Chen, VanHuffel, Decanniere, VanHecke, JMR, 1994, 109A, 46-55. - For 2D data performs independant enhancement on each row of data array. + For 2D data performs independent enhancement on each row of data array. Parameters ---------- @@ -634,14 +634,14 @@ def lp_model(trace, slice=slice(None), order=8, mode="f", mirror=None, if mode == "b": poles = [1. / pole for pole in poles] - # determind the damping factor and frequencies from the roots + # determine the damping factor and frequencies from the roots damp = [root2damp(pole) for pole in poles] freq = [root2freq(pole) for pole in poles] if full is False: return damp, freq - # perform Least Squares fitting to determind amplitudes and phases. + # perform Least Squares fitting to determine amplitudes and phases. # We need to find a least squares solutions to: # z_0*b_0^0+z_1*b_1^0+.... = x_0 @@ -660,7 +660,7 @@ def lp_model(trace, slice=slice(None), order=8, mode="f", mirror=None, B = np.row_stack([poles ** (i) for i in range(len(x))]) z, resid, rank, s = np.linalg.lstsq(B, np.array(x)) - # Now the z_n = amp_n*exp(phase_n*i), use this to determind the amplitudes + # Now the z_n = amp_n*exp(phase_n*i), use this to determine the amplitudes # and phases amp = [cof2amp(cof) for cof in z] phase = [cof2phase(cof) for cof in z] @@ -707,7 +707,7 @@ def cof2phase(z): ############################## -# data preperation functions # +# data preparation functions # ############################## @@ -874,7 +874,7 @@ def find_lpc_tls(D, d): def find_lpc_fb(x, order, bad_roots, fix_mode, method): """ - Determind LP coefficient using forward-backward linear prediction. + Determine LP coefficient using forward-backward linear prediction. Averages LP coefficients generated from solving the forward and backward linear prediction equations after reversing the roots of characteristic @@ -915,7 +915,7 @@ def find_lpc_fb(x, order, bad_roots, fix_mode, method): def find_lpc_bf(x, order, bad_roots, fix_mode, method): """ - Determind LP coefficient using backward-forward linear prediction. + Determine LP coefficient using backward-forward linear prediction. Averages LP coefficients generated from solving the forward and backward linear prediction equations after reversing the roots of characteristic @@ -963,7 +963,7 @@ def find_lproots_hsvd(x, M, K, mode, zmethod='sm'): """ Find LP roots (poles) using the HSVD method - Perform a HSVD linear prediction to determind signal roots (poles) as + Perform a HSVD linear prediction to determine signal roots (poles) as described in: Barkhuijsen, DeBeer, and Van Ormondt, JMR, 1987, 73, 553 @@ -1019,7 +1019,7 @@ def find_lproots_hsvd(x, M, K, mode, zmethod='sm'): # SVD of data matrix and truncation of U to form Uk U, s, Vh = scipy.linalg.svd(X) - Uk = np.mat(U[:, :K]) # trucated U matrix of rank K + Uk = np.mat(U[:, :K]) # truncated U matrix of rank K Ub = Uk[:-1] # Uk with bottom row removed Ut = Uk[1:] # Uk with top row removed diff --git a/nmrglue/util/xcpy.py b/nmrglue/util/xcpy.py index f3758592..d99dd20e 100644 --- a/nmrglue/util/xcpy.py +++ b/nmrglue/util/xcpy.py @@ -62,7 +62,7 @@ from ConfigParser import SafeConfigParser except ModuleNotFoundError: # this will only fail if Python 3 is used - # but that error willl be handled by the + # but that error will be handled by the # check_jython() function pass @@ -315,7 +315,7 @@ def verify_python(command): errmsg = """ {} does not seem to be a valid python file. Please check the configuration file using 'xcpy --config', - or change the congiguration file using 'xcpy --settings' + or change the configuration file using 'xcpy --settings' This attempt will be aborted. """.format( command diff --git a/setup.cfg b/setup.cfg index 3c6e79cf..185f3159 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,5 @@ [bdist_wheel] universal=1 + +[codespell] +ignore-words-list = nd,ro,ser,te,tthe,varian diff --git a/tests/pipe_proc_tests/tm.py b/tests/pipe_proc_tests/tm.py index 6a40b2c3..13dcca92 100755 --- a/tests/pipe_proc_tests/tm.py +++ b/tests/pipe_proc_tests/tm.py @@ -1,7 +1,7 @@ #! /usr/bin/env python """ Create files for tm unit test """ -# ignore RuntimeWarnings created by division by zero when appling apodization +# ignore RuntimeWarnings created by division by zero when applying apodization import warnings warnings.simplefilter('ignore', RuntimeWarning) diff --git a/tests/test_pipe_proc.py b/tests/test_pipe_proc.py index dddd16d3..585db7b9 100644 --- a/tests/test_pipe_proc.py +++ b/tests/test_pipe_proc.py @@ -208,7 +208,7 @@ def test_zf(): def test_add(): """ ADD function """ - return _standard_test('add', 4) # Note that test 5 fails intensionally + return _standard_test('add', 4) # Note that test 5 fails intentionally def test_dx(): @@ -238,7 +238,7 @@ def test_mir(): def test_mult(): """ MULT function """ - return _standard_test('mult', 3) # Note that test 4 fails intensionally + return _standard_test('mult', 3) # Note that test 4 fails intentionally def test_rev(): @@ -359,7 +359,7 @@ def test_known_fail(): 'shuf8.glue', 'shuf9.glue', 'shuf10.glue'] pipe_script = 'known_fail.com' glue_script = 'known_fail.py' - # also 'shuf11', 'shuf12' and dev1' test all fail intensionally. + # also 'shuf11', 'shuf12' and dev1' test all fail intentionally. return _perform_test(glue_script, pipe_script, glue_files, pipe_files) """