Skip to content

Commit

Permalink
added test for fft and edited previous tests
Browse files Browse the repository at this point in the history
add a test of the new fft methods and compares the to the previous way of doing it and doing the same thing in different ways with the new methods, to make sure they all give the same answer.

Also edited test_file_selection_methods so that it loads from local, not a google bucket. Just to speed things up a bit.

All test run in 30s on my local machine.
  • Loading branch information
jkingslake committed Oct 10, 2024
1 parent 4825f7f commit 3cf08fb
Show file tree
Hide file tree
Showing 3 changed files with 205 additions and 50 deletions.
64 changes: 60 additions & 4 deletions notebooks/test_notes/custom_fft_implemention_testing.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
},
{
"cell_type": "code",
"execution_count": 216,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -28,16 +28,16 @@
},
{
"cell_type": "code",
"execution_count": 217,
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[<matplotlib.lines.Line2D at 0x3266e8e60>]"
"[<matplotlib.lines.Line2D at 0x175fd8050>]"
]
},
"execution_count": 217,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
},
Expand All @@ -57,6 +57,62 @@
"new.dB().plot(xlim = (0, 10))"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"('time', 'attenuator_setting_pair', 'profile_range')"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"new.dims"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"np.allclose(old.transpose(*new.dims).values, new.values)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 250,
"metadata": {},
"outputs": [],
"source": [
"np.allclose(new.values, old.values)"
]
},
{
"cell_type": "code",
"execution_count": 244,
Expand Down
122 changes: 91 additions & 31 deletions notebooks/test_notes/tests_for_github.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
"sys.path.append(\"../../../xapres/\")\n",
"import xapres as xa\n",
"import numpy\n",
"from xapres import load, utils\n"
"from xapres import load, utils\n",
"from numpy import allclose as npc\n"
]
},
{
Expand Down Expand Up @@ -194,7 +195,15 @@
},
{
"cell_type": "code",
"execution_count": 39,
"execution_count": 84,
"id": "091b6e6d",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 85,
"id": "076ae5d4-5471-4945-b2e5-d1932d46b870",
"metadata": {},
"outputs": [],
Expand All @@ -203,23 +212,19 @@
"# In each case load it and then check that we have loaded the correct file. \n",
"\n",
"def test_file_selection_methods():\n",
" directory='gs://ldeo-glaciology/GL_apres_2022/A101'\n",
" fs1 = load.from_dats(max_range=1400)\n",
" fs1.load_all(directory, \n",
" remote_load = True,\n",
" file_numbers_to_process=[0,1])\n",
" directory='../../data/sample/polarmetric'\n",
" fs1 = load.from_dats()\n",
" fs1.load_all(directory, legacy_fft=False, file_numbers_to_process=[0,1])\n",
"\n",
" fs2 = load.from_dats(max_range=1400)\n",
" fs2.load_all(directory, \n",
" remote_load = True,\n",
" file_names_to_process = fs1.dat_filenames_to_process)\n",
" fs2 = load.from_dats()\n",
" fs2.load_all(directory, legacy_fft=False, file_names_to_process = fs1.dat_filenames_to_process)\n",
"\n",
" assert fs1.data.equals(fs2.data)"
]
},
{
"cell_type": "code",
"execution_count": 40,
"execution_count": 86,
"id": "ced97345-a02e-4bbb-a428-1bcb9eb74d84",
"metadata": {},
"outputs": [],
Expand Down Expand Up @@ -308,39 +313,98 @@
},
{
"cell_type": "code",
"execution_count": 29,
"execution_count": 21,
"id": "9a390ba7-5893-4fd7-9414-d1107f2acd73",
"metadata": {},
"outputs": [],
"source": [
"# test the displacement calculation\n",
"\n",
"def test_displacement_calculation():\n",
" from_zarr = load.load_zarr() # lazily load a large APRES dataset from Greenland\n",
" p1 = from_zarr.isel(time=2000).profile_stacked # select a profile \n",
" p2 = from_zarr.isel(time=2100).profile_stacked # select a different profile \n",
" \n",
" utils.compute_displacement(p1, p2) # calculate the displacement between the two profiles\n",
" fd = xa.load.from_dats()\n",
"\n",
" t = from_zarr.sel(time='2022-07-17').profile_stacked # select all the profiles on a specfic date\n",
" results = t.displacement_timeseries(bin_size = 30, offset = 3) # compute a time series of displacement from these data. Use non-default values for offset and bin_size \n",
" from_local = fd.load_all(directory='../../data/sample/multi-burst-dat-file/', legacy_fft=False) # load the data from a local directory\n",
" p1 = from_local.isel(time=2, attenuator_setting_pair=0).profile # select a profile \n",
" p2 = from_local.isel(time=5, attenuator_setting_pair=0).profile # select a different profile \n",
"\n",
" assert (abs(results)>0).all().load()"
" utils.compute_displacement(p1, p2, bin_size = 25) # calculate the displacement between the two profiles. Use a non-default value for bin_size\n",
"\n",
"\n",
" profiles = from_local.isel(attenuator_setting_pair=0).profile # select all the profiles on a specfic date\n",
" results = profiles.displacement_timeseries(bin_size = 30, offset = 3) # compute a time series of displacement from these data. Use non-default values for offset and bin_size. \n",
" assert results"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "44e3e329",
"execution_count": 22,
"id": "524b2b24",
"metadata": {},
"outputs": [],
"source": [
"out = test_displacement_calculation()"
"test_displacement_calculation()"
]
},
{
"cell_type": "code",
"execution_count": 42,
"execution_count": 73,
"id": "5bfeb951",
"metadata": {},
"outputs": [],
"source": [
"def test_fft_calculations():\n",
" # initialize\n",
" fd = xa.load.from_dats()\n",
" directory='../../data/sample/single_dat_file/'\n",
" \n",
" # load the data from dat files\n",
" ## load data from a local directory, compute the fft with the legacy method and dont correct the padding error\n",
" load_oldfft_uncorrectedPad = fd.load_all(directory, legacy_fft=True, corrected_pad=False).profile\n",
" ## load data from a local directory, compute the fft with the legacy method, but this time correct the padding error\n",
" load_oldfft_correctedPad = fd.load_all(directory, legacy_fft=True, corrected_pad=True).profile\n",
"\n",
" ## load from a local directory, compute the fft with the new method\n",
" load_newfft_full = fd.load_all(directory, legacy_fft=False)\n",
" load_newfft = load_newfft_full.profile\n",
" ## load from a local directory, compute the fft with the new method while setting the crop-limits on the chirp to be their default values (this shouldnt effect the answer from the line above)\n",
" load_newfft_defaultLimits = fd.load_all(directory, legacy_fft=False, addProfileToDs_kwargs={'crop_chirp_start': 0,'crop_chirp_end': 1}).profile\n",
" ## load from a local directory, compute the fft with the new method while setting the crop-limits on the chirp to some other values (this will effect the answer)\n",
" load_newfft_nonDefaultLimits = fd.load_all(directory, legacy_fft=False, addProfileToDs_kwargs={'crop_chirp_start': 0,'crop_chirp_end': 0.5}).profile\n",
"\n",
" # Compute the ffts on pre-loaded data\n",
" ## use the method .addProfileToDs() to compute the fft on a pre-loaded dataset\n",
" afterLoad_newfft_ds = load_newfft_full.addProfileToDs()\n",
" ## use the method .computeProfile() to compute the fft on a pre-loaded chirp dataarray\n",
" afterLoad_newfft_da = load_newfft_full.chirp.computeProfile()\n",
"\n",
" # Change a constant used in the calculation of the range, this dosesnt effect the profiles, just the profile_range\n",
" constants = load_newfft_full.attrs['constants']\n",
" constants['c'] = 2e8\n",
" afterLoad_newfft_da_differentConstants = load_newfft_full.chirp.computeProfile(constants=constants)\n",
"\n",
" assert not npc(load_oldfft_uncorrectedPad.values, load_oldfft_correctedPad.values)\n",
" d = load_newfft.dims #needed to transpose the dataarrays that use legacy_fft=True to be the same as those which use legacy_fft=False\n",
" assert npc(load_oldfft_correctedPad.transpose(*d).values, load_newfft.values)\n",
" assert npc(load_oldfft_correctedPad.transpose(*d).values, load_newfft_defaultLimits.values)\n",
" assert npc(afterLoad_newfft_ds.profile.values, load_newfft_full.profile.values)\n",
" assert npc(afterLoad_newfft_da.values, load_newfft_full.profile.values)\n",
" assert npc(afterLoad_newfft_da_differentConstants.values, load_newfft_full.profile.values)\n",
" assert not npc(afterLoad_newfft_da_differentConstants.profile_range.values, load_newfft_full.profile_range.values)"
]
},
{
"cell_type": "code",
"execution_count": 74,
"id": "1a8401a0",
"metadata": {},
"outputs": [],
"source": [
"test_fft_calculations()"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "5c91b524",
"metadata": {},
"outputs": [],
Expand All @@ -355,16 +419,12 @@
},
{
"cell_type": "code",
"execution_count": 53,
"execution_count": 24,
"id": "7de01d8e",
"metadata": {},
"outputs": [],
"source": [
"a = xr.DataArray([1, 2, 3], dims=\"X\")\n",
"\n",
"assert xr.DataArray.dB\n",
"assert xr.DataArray.sonify\n",
"assert xr.DataArray.displacement_timeseries"
"test_bound_methods_are_added_correctly()"
]
},
{
Expand Down
69 changes: 54 additions & 15 deletions tests/test_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,16 +24,18 @@ def test_dat_file_loading():

# test the displacement calculation
def test_displacement_calculation():
from_zarr = load.load_zarr() # lazily load a large APRES dataset from Greenland
p1 = from_zarr.isel(time=2000).profile_stacked # select a profile
p2 = from_zarr.isel(time=2100).profile_stacked # select a different profile

utils.compute_displacement(p1, p2) # calculate the displacement between the two profiles
fd = load.from_dats()

t = from_zarr.sel(time='2022-07-17').profile_stacked # select all the profiles on a specfic date
results = t.displacement_timeseries(bin_size = 30, offset = 3) # compute a time series of displacement from these data. Use non-default values for offset and bin_size
from_local = fd.load_all(directory='data/sample/multi-burst-dat-file/', legacy_fft=False) # load the data from a local directory
p1 = from_local.isel(time=2, attenuator_setting_pair=0).profile # select a profile
p2 = from_local.isel(time=5, attenuator_setting_pair=0).profile # select a different profile

assert (abs(results)>0).all().load()
utils.compute_displacement(p1, p2, bin_size = 25) # calculate the displacement between the two profiles. Use a non-default value for bin_size


profiles = from_local.isel(attenuator_setting_pair=0).profile # select all the profiles on a specfic date
results = profiles.displacement_timeseries(bin_size = 30, offset = 3) # compute a time series of displacement from these data. Use non-default values for offset and bin_size.
assert results



Expand Down Expand Up @@ -62,16 +64,12 @@ def test_file_search_methods():
# In each case load it and then check that we have loaded the correct file.

def test_file_selection_methods():
directory='gs://ldeo-glaciology/GL_apres_2022/A101'
directory='data/sample/polarmetric'
fs1 = load.from_dats()
fs1.load_all(directory,
remote_load = True,
file_numbers_to_process=[0,1])
fs1.load_all(directory, legacy_fft=False, file_numbers_to_process=[0,1])

fs2 = load.from_dats()
fs2.load_all(directory,
remote_load = True,
file_names_to_process = fs1.dat_filenames_to_process)
fs2.load_all(directory, legacy_fft=False, file_names_to_process = fs1.dat_filenames_to_process)

assert fs1.data.equals(fs2.data)

Expand Down Expand Up @@ -101,3 +99,44 @@ def test_wrappers():
)

from_zarr = load.load_zarr()

from numpy import allclose as npc

def test_fft_calculations():
# initialize
fd = load.from_dats()
directory='data/sample/single_dat_file/'

# load the data from dat files
## load data from a local directory, compute the fft with the legacy method and dont correct the padding error
load_oldfft_uncorrectedPad = fd.load_all(directory, legacy_fft=True, corrected_pad=False).profile
## load data from a local directory, compute the fft with the legacy method, but this time correct the padding error
load_oldfft_correctedPad = fd.load_all(directory, legacy_fft=True, corrected_pad=True).profile

## load from a local directory, compute the fft with the new method
load_newfft_full = fd.load_all(directory, legacy_fft=False)
load_newfft = load_newfft_full.profile
## load from a local directory, compute the fft with the new method while setting the crop-limits on the chirp to be their default values (this shouldnt effect the answer from the line above)
load_newfft_defaultLimits = fd.load_all(directory, legacy_fft=False, addProfileToDs_kwargs={'crop_chirp_start': 0,'crop_chirp_end': 1}).profile
## load from a local directory, compute the fft with the new method while setting the crop-limits on the chirp to some other values (this will effect the answer)
load_newfft_nonDefaultLimits = fd.load_all(directory, legacy_fft=False, addProfileToDs_kwargs={'crop_chirp_start': 0,'crop_chirp_end': 0.5}).profile

# Compute the ffts on pre-loaded data
## use the method .addProfileToDs() to compute the fft on a pre-loaded dataset
afterLoad_newfft_ds = load_newfft_full.addProfileToDs()
## use the method .computeProfile() to compute the fft on a pre-loaded chirp dataarray
afterLoad_newfft_da = load_newfft_full.chirp.computeProfile()

# Change a constant used in the calculation of the range, this dosesnt effect the profiles, just the profile_range
constants = load_newfft_full.attrs['constants']
constants['c'] = 2e8
afterLoad_newfft_da_differentConstants = load_newfft_full.chirp.computeProfile(constants=constants)

assert not npc(load_oldfft_uncorrectedPad.values, load_oldfft_correctedPad.values)
d = load_newfft.dims #needed to transpose the dataarrays that use legacy_fft=True to be the same as those which use legacy_fft=False
assert npc(load_oldfft_correctedPad.transpose(*d).values, load_newfft.values)
assert npc(load_oldfft_correctedPad.transpose(*d).values, load_newfft_defaultLimits.values)
assert npc(afterLoad_newfft_ds.profile.values, load_newfft_full.profile.values)
assert npc(afterLoad_newfft_da.values, load_newfft_full.profile.values)
assert npc(afterLoad_newfft_da_differentConstants.values, load_newfft_full.profile.values)
assert not npc(afterLoad_newfft_da_differentConstants.profile_range.values, load_newfft_full.profile_range.values)

0 comments on commit 3cf08fb

Please sign in to comment.