Skip to content

Commit

Permalink
bugfix for pgm reading
Browse files Browse the repository at this point in the history
  • Loading branch information
dchaddock committed Feb 5, 2024
1 parent c716b3e commit 42e6b26
Show file tree
Hide file tree
Showing 6 changed files with 48 additions and 27 deletions.
2 changes: 1 addition & 1 deletion python/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "trex-imager-readfile"
version = "1.6.0"
version = "1.6.1"
description = "Read functions for TREx ASI raw image files"
readme = "README.md"
homepage = "https://github.com/ucalgary-aurora/trex-imager-readfile"
Expand Down
2 changes: 1 addition & 1 deletion python/tests/test_suite/test_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@


def test_version():
assert __version__ == "1.6.0"
assert __version__ == "1.6.1"
2 changes: 1 addition & 1 deletion python/trex_imager_readfile/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "1.6.0"
__version__ = "1.6.1"

# core functions for easy use
from .blueline import read as read_blueline
Expand Down
23 changes: 15 additions & 8 deletions python/trex_imager_readfile/blueline.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,12 +174,6 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False
:return: images, metadata dictionaries, and problematic files
:rtype: numpy.ndarray, list[dict], list[dict]
"""
# pre-allocate array sizes (optimization)
predicted_num_frames = len(file_list) * 20
images = np.empty([270, 320, predicted_num_frames], dtype=__BLUELINE_DT)
metadata_dict_list = [{}] * predicted_num_frames
problematic_file_list = []

# if input is just a single file name in a string, convert to a list to be fed to the workers
if isinstance(file_list, str):
file_list = [file_list]
Expand Down Expand Up @@ -223,7 +217,19 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False
quiet=quiet,
))

# reorganize data
# derive number of frames to prepare for
total_num_frames = 0
for i in range(0, len(data)):
if (data[i][2] is True):
continue
total_num_frames += data[i][0].shape[2]

# pre-allocate array sizes
images = np.empty([270, 320, total_num_frames], dtype=__BLUELINE_DT)
metadata_dict_list = [{}] * total_num_frames
problematic_file_list = []

# populate data
list_position = 0
for i in range(0, len(data)):
# check if file was problematic
Expand All @@ -232,6 +238,7 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False
"filename": data[i][3],
"error_message": data[i][4],
})
continue

# check if any data was read in
if (len(data[i][1]) == 0):
Expand All @@ -248,7 +255,7 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False

# trim unused elements from predicted array sizes
metadata_dict_list = metadata_dict_list[0:list_position]
images = np.delete(images, range(list_position, predicted_num_frames), axis=2)
images = np.delete(images, range(list_position, total_num_frames), axis=2)

# ensure entire array views as uint16
images = images.astype(np.uint16)
Expand Down
23 changes: 15 additions & 8 deletions python/trex_imager_readfile/nir.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,12 +174,6 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False
:return: images, metadata dictionaries, and problematic files
:rtype: numpy.ndarray, list[dict], list[dict]
"""
# pre-allocate array sizes (optimization)
predicted_num_frames = len(file_list) * 10
images = np.empty([256, 256, predicted_num_frames], dtype=__NIR_DT)
metadata_dict_list = [{}] * predicted_num_frames
problematic_file_list = []

# if input is just a single file name in a string, convert to a list to be fed to the workers
if isinstance(file_list, str):
file_list = [file_list]
Expand Down Expand Up @@ -223,7 +217,19 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False
quiet=quiet,
))

# reorganize data
# derive number of frames to prepare for
total_num_frames = 0
for i in range(0, len(data)):
if (data[i][2] is True):
continue
total_num_frames += data[i][0].shape[2]

# pre-allocate array sizes
images = np.empty([256, 256, total_num_frames], dtype=__NIR_DT)
metadata_dict_list = [{}] * total_num_frames
problematic_file_list = []

# populate data
list_position = 0
for i in range(0, len(data)):
# check if file was problematic
Expand All @@ -232,6 +238,7 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False
"filename": data[i][3],
"error_message": data[i][4],
})
continue

# check if any data was read in
if (len(data[i][1]) == 0):
Expand All @@ -248,7 +255,7 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False

# trim unused elements from predicted array sizes
metadata_dict_list = metadata_dict_list[0:list_position]
images = np.delete(images, range(list_position, predicted_num_frames), axis=2)
images = np.delete(images, range(list_position, total_num_frames), axis=2)

# ensure entire array views as uint16
images = images.astype(np.uint16)
Expand Down
23 changes: 15 additions & 8 deletions python/trex_imager_readfile/spectrograph.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,12 +174,6 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False
:return: images, metadata dictionaries, and problematic files
:rtype: numpy.ndarray, list[dict], list[dict]
"""
# pre-allocate array sizes (optimization)
predicted_num_frames = len(file_list) * 4
images = np.empty([1024, 256, predicted_num_frames], dtype=__SPECTROGRAPH_DT)
metadata_dict_list = [{}] * predicted_num_frames
problematic_file_list = []

# if input is just a single file name in a string, convert to a list to be fed to the workers
if isinstance(file_list, str):
file_list = [file_list]
Expand Down Expand Up @@ -223,7 +217,19 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False
quiet=quiet,
))

# reorganize data
# derive number of frames to prepare for
total_num_frames = 0
for i in range(0, len(data)):
if (data[i][2] is True):
continue
total_num_frames += data[i][0].shape[2]

# pre-allocate array sizes
images = np.empty([1024, 256, total_num_frames], dtype=__SPECTROGRAPH_DT)
metadata_dict_list = [{}] * total_num_frames
problematic_file_list = []

# populate data
list_position = 0
for i in range(0, len(data)):
# check if file was problematic
Expand All @@ -232,6 +238,7 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False
"filename": data[i][3],
"error_message": data[i][4],
})
continue

# check if any data was read in
if (len(data[i][1]) == 0):
Expand All @@ -248,7 +255,7 @@ def read(file_list, workers=1, first_frame=False, no_metadata=False, quiet=False

# trim unused elements from predicted array sizes
metadata_dict_list = metadata_dict_list[0:list_position]
images = np.delete(images, range(list_position, predicted_num_frames), axis=2)
images = np.delete(images, range(list_position, total_num_frames), axis=2)

# ensure entire array views as uint16
images = images.astype(np.uint16)
Expand Down

0 comments on commit 42e6b26

Please sign in to comment.