Skip to content

Commit

Permalink
update-emd-reader
Browse files Browse the repository at this point in the history
  • Loading branch information
gduscher committed Feb 24, 2024
1 parent 64b8908 commit b2f5e90
Show file tree
Hide file tree
Showing 6 changed files with 61 additions and 31 deletions.
4 changes: 2 additions & 2 deletions SciFiReaders/__version__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
version = '0.11.0'
time = '2023-11-08 17:00:00'
version = '0.11.4'
time = '2024-01-21 17:00:00'
3 changes: 1 addition & 2 deletions SciFiReaders/readers/microscopy/em/tem/dm_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,6 @@ def read(self):
start=1
channel_number = 0
for image_number in self.__stored_tags['ImageList'].keys():
print(image_number, start)
if int(image_number) >= start:
dataset = self.get_dataset(self.__stored_tags['ImageList'][image_number])
if isinstance(dataset, sidpy.Dataset):
Expand Down Expand Up @@ -231,8 +230,8 @@ def read(self):

del self.__stored_tags['ImageList']
main_dataset_key = list(self.datasets.keys())[0]

for key, dataset in self.datasets.items():
print(key, dataset)
if 'urvey' in dataset.title:
main_dataset_key = key
if self.verbose:
Expand Down
1 change: 1 addition & 0 deletions SciFiReaders/readers/microscopy/em/tem/edax_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ def reader(h5_file):
datasets = {}
# read spectum images first
for dataset_item in all_datasets_list:
print(dataset_item)
if 'Live Map' in dataset_item:
if 'SPD' in dataset_item:
dataset = read_spectrum_image(base_group, dataset_item)
Expand Down
72 changes: 50 additions & 22 deletions SciFiReaders/readers/microscopy/em/tem/emd_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class EMDReader(sidpy.Reader):
datasets: dict
dictionary of sidpy.Datasets
"""
def __init__(self, file_path):
def __init__(self, file_path, sum_frames=False, no_eds=False):
super(EMDReader, self).__init__(file_path)

# Let h5py raise an OS error if a non-HDF5 file was provided
Expand All @@ -64,9 +64,12 @@ def __init__(self, file_path):
self.data_array = None
self.metadata = None
self.label_dict = {}
self.no_eds = no_eds
self.sum_frames = sum_frames

self.number_of_frames = 1


def can_read(self):
"""
Checks whether or not this Reader can read the provided file
Expand Down Expand Up @@ -249,28 +252,52 @@ def get_image(self):
key = f"Channel_{int(self.channel_number):03d}"
self.key = key
self.channel_number += 1

scale_x = float(self.metadata['BinaryResult']['PixelSize']['width']) * 1e9
scale_y = float(self.metadata['BinaryResult']['PixelSize']['height']) * 1e9

if self.metadata['BinaryResult']['PixelUnitX'] == '1/m':
units = '1/nm'
quantity = 'reciprocal distance'
dimension_type='reciprocal'
to_nm = 1e-9
else:
units = 'nm'
quantity = 'distance'
dimension_type='spatial'
to_nm = 1e9

scale_x = float(self.metadata['BinaryResult']['PixelSize']['width']) * to_nm
scale_y = float(self.metadata['BinaryResult']['PixelSize']['height']) * to_nm
offset_x = float(self.metadata['BinaryResult']['Offset']['x']) * to_nm
offset_y = float(self.metadata['BinaryResult']['Offset']['y']) * to_nm

if self.sum_frames:
data_array = np.zeros([self.data_array.shape[0], self.data_array.shape[1], 1])
for i in range(self.data_array.shape[2]):
data_array[:, :, 0] += self.data_array[:, :, i]
self.data_array = data_array

if self.data_array.shape[2] == 1:
self.datasets[key] = sidpy.Dataset.from_array(self.data_array[:, :, 0])
self.datasets[key].data_type = 'image'
self.datasets[key].set_dimension(0, sidpy.Dimension(np.arange(self.data_array.shape[0]) * scale_x,
name='x', units='nm',
quantity='distance',
dimension_type='spatial'))
self.datasets[key].set_dimension(1, sidpy.Dimension(np.arange(self.data_array.shape[1]) * scale_y,
name='y', units='nm',
quantity='distance',
self.datasets[key].set_dimension(0, sidpy.Dimension(np.arange(self.data_array.shape[0]) * scale_x + offset_x,
name='x', units=units,
quantity=quantity,
dimension_type=dimension_type))
self.datasets[key].set_dimension(1, sidpy.Dimension(np.arange(self.data_array.shape[1]) * scale_y + offset_y,
name='y', units=units,
quantity=quantity,
dimension_type='spatial'))
else:
# There is a problem with random access of data due to chunking in hdf5 files
# Speed-up copied from hyperspy.ioplugins.EMDReader.FEIEMDReader

data_array = np.empty(self.data_array.shape)
self.data_array.read_direct(data_array)
self.data_array = np.rollaxis(data_array, axis=2)
if self.sum_frames:
data_array = np.zeros(self.data_array.shape[0:2])
self.data_array.read_direct(data_array)
self.data_array = np.rollaxis(data_array, axis=2)
self.data_array = self.data_array.sum(axis=2)
else:
data_array = np.empty(self.data_array.shape)
self.data_array.read_direct(data_array)
self.data_array = np.rollaxis(data_array, axis=2)
# np.moveaxis(data_array, source=[0, 1, 2], destination=[2, 0, 1])

self.datasets[key] = sidpy.Dataset.from_array(self.data_array)
Expand All @@ -279,20 +306,21 @@ def get_image(self):
name='frame', units='frame',
quantity='time',
dimension_type='temporal'))
self.datasets[key].set_dimension(1, sidpy.Dimension(np.arange(self.data_array.shape[1]) * scale_x,
name='x', units='nm',
quantity='distance',
dimension_type='spatial'))
self.datasets[key].set_dimension(2, sidpy.Dimension(np.arange(self.data_array.shape[2]) * scale_y,
name='y', units='nm',
quantity='distance',
self.datasets[key].set_dimension(1, sidpy.Dimension(np.arange(self.data_array.shape[0]) * scale_x + offset_x,
name='x', units=units,
quantity=quantity,
dimension_type=dimension_type))
self.datasets[key].set_dimension(2, sidpy.Dimension(np.arange(self.data_array.shape[1]) * scale_y + offset_y,
name='y', units=units,
quantity=quantity,
dimension_type='spatial'))
self.datasets[key].original_metadata = self.metadata

self.datasets[key].units = 'counts'
self.datasets[key].quantity = 'intensity'
if self.image_key in self.label_dict:
self.datasets[key].title = self.label_dict[self.image_key]
self.data_array=np.zeros([1,1])

def extract_crucial_metadata(self, key):
metadata = self.datasets[key].original_metadata
Expand Down
7 changes: 4 additions & 3 deletions SciFiReaders/readers/microscopy/spm/afm/pifm.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,14 @@
from pyNSID.io.hdf_io import write_nsid_dataset
from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs


class PiFMTranslator(Reader):
"""
Class that writes images, spectrograms, point spectra and associated ancillary data sets to h5 file in pyUSID data
structure.
"""

def read(self ):
def read(self):
"""
Parameters
----------
Expand Down Expand Up @@ -101,13 +102,13 @@ def read_file_desc(self):
spectrum_desc = {}
pspectrum_desc = {}

with open(self.path,'r', encoding="ISO-8859-1") as f:
with open(self.path, 'r', encoding="ISO-8859-1") as f:

lines = f.readlines()
for index, line in enumerate(lines):

sline = [val.strip() for val in line.split(':')]
#if true, then file describes image.
# if true, then file describes image.

if sline[0].startswith('FileDescBegin'):
no_descriptors = 5
Expand Down
5 changes: 3 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,14 @@

# TODO: Move requirements to requirements.txt
requirements = [ # basic
'setuptools==58.2.0',
'numpy',
'toolz', # dask installation failing without this
'cytoolz', # dask installation failing without this
'dask>=2.20.0',
'sidpy>=0.11.2',
'numba==0.58; python_version < "3.11"',
'numba>=0.59.0rc1; python_version >= "3.11"',
'numba==0.58; python_version < "3.10"',
'numba>=0.59.0rc1; python_version >= "3.10"',
'ipython>=7.1.0',
'pyUSID',
# generic:
Expand Down

0 comments on commit b2f5e90

Please sign in to comment.