Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

- #127

Closed
wants to merge 25 commits into from
Closed

- #127

Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions SciFiReaders/__version__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
version = '0.11.0'
time = '2023-11-08 17:00:00'
version = '0.11.5'
time = '2024-02-26 17:00:00'
3 changes: 1 addition & 2 deletions SciFiReaders/readers/microscopy/em/tem/dm_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,6 @@ def read(self):
start=1
channel_number = 0
for image_number in self.__stored_tags['ImageList'].keys():
print(image_number, start)
if int(image_number) >= start:
dataset = self.get_dataset(self.__stored_tags['ImageList'][image_number])
if isinstance(dataset, sidpy.Dataset):
Expand Down Expand Up @@ -231,8 +230,8 @@ def read(self):

del self.__stored_tags['ImageList']
main_dataset_key = list(self.datasets.keys())[0]

for key, dataset in self.datasets.items():
print(key, dataset)
if 'urvey' in dataset.title:
main_dataset_key = key
if self.verbose:
Expand Down
1 change: 1 addition & 0 deletions SciFiReaders/readers/microscopy/em/tem/edax_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ def reader(h5_file):
datasets = {}
# read spectum images first
for dataset_item in all_datasets_list:
print(dataset_item)
if 'Live Map' in dataset_item:
if 'SPD' in dataset_item:
dataset = read_spectrum_image(base_group, dataset_item)
Expand Down
717 changes: 365 additions & 352 deletions SciFiReaders/readers/microscopy/em/tem/emd_reader.py

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions SciFiReaders/readers/microscopy/spm/afm/AR_hdf5.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,9 @@ def read(self, verbose=False):
self.channels_name = [name for name in channels_name]

try:
self.points_per_sec = np.float(self.note_value('ARDoIVPointsPerSec'))
self.points_per_sec = float(self.note_value('ARDoIVPointsPerSec'))
except NameError:
self.points_per_sec = np.float(self.note_value('NumPtsPerSec'))
self.points_per_sec = float(self.note_value('NumPtsPerSec'))

if self.verbose:
print('Map size [X, Y]: ', self.map_size)
Expand All @@ -116,11 +116,11 @@ def read(self, verbose=False):
points_trimmed = np.array(self.segments[:, :, extension_idx]) - short_ext

# Open the output hdf5 file
x_dim = np.linspace(0, np.float(self.note_value('FastScanSize')),
x_dim = np.linspace(0, float(self.note_value('FastScanSize')),
self.map_size['X'])
y_dim = np.linspace(0, np.float(self.note_value('FastScanSize')),
y_dim = np.linspace(0, float(self.note_value('FastScanSize')),
self.map_size['Y'])
z_dim = np.arange(tot_length) / np.float(self.points_per_sec)
z_dim = np.arange(tot_length) / float(self.points_per_sec)

datasets = [] #list of sidpy datasets

Expand Down
70 changes: 6 additions & 64 deletions SciFiReaders/readers/microscopy/spm/afm/pifm.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,14 @@
import numpy as np
import h5py

from pyNSID.io.hdf_io import write_nsid_dataset
from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs

class PiFMTranslator(Reader):
class PiFMReader(Reader):
"""
Class that writes images, spectrograms, point spectra and associated ancillary data sets to h5 file in pyUSID data
structure.
Class that reads images, spectrograms, point spectra and associated
ancillary data sets from an Anfatec scan structure.
"""

def read(self ):
def read(self):
"""
Parameters
----------
Expand Down Expand Up @@ -101,13 +99,13 @@ def read_file_desc(self):
spectrum_desc = {}
pspectrum_desc = {}

with open(self.path,'r', encoding="ISO-8859-1") as f:
with open(self.path, 'r', encoding="ISO-8859-1") as f:

lines = f.readlines()
for index, line in enumerate(lines):

sline = [val.strip() for val in line.split(':')]
#if true, then file describes image.
# if true, then file describes image.

if sline[0].startswith('FileDescBegin'):
no_descriptors = 5
Expand Down Expand Up @@ -375,60 +373,4 @@ def make_dimensions(self):

# self.pos_ind, self.pos_val, self.pos_dims = pos_ind, pos_val, pos_dims

return

# HDF5 creation
def create_hdf5_file(self, append_path='', overwrite=False):
""" Sets up the HDF5 file for writing

append_path : string (Optional)
h5_file to add these data to, must be a path to the h5_file on disk
overwrite : bool (optional, default=False)
If True, will overwrite an existing .h5 file of the same name
"""

if not append_path:
h5_path = os.path.join(self.directory, self.basename.replace('.txt', '.h5'))
if os.path.exists(h5_path):
if not overwrite:
raise FileExistsError('This file already exists). Set attribute overwrite to True')
else:
print('Overwriting file', h5_path)
#os.remove(h5_path)

self.h5_f = h5py.File(h5_path, mode='w')

else:
if not os.path.exists(append_path):
raise Exception('File does not exist. Check pathname.')
self.h5_f = h5py.File(append_path, mode='r+')

self.h5_img_grp = create_indexed_group(self.h5_f, "Images")
self.h5_spectra_grp = create_indexed_group(self.h5_f, "Spectra")
self.h5_spectrogram_grp = create_indexed_group(self.h5_f, "Spectrogram")

write_simple_attrs(self.h5_img_grp, self.params_dictionary)
write_simple_attrs(self.h5_spectra_grp, self.params_dictionary)
write_simple_attrs(self.h5_spectrogram_grp, self.params_dictionary)

return

def write_datasets_hdf5(self):
""" Writes the datasets as pyNSID datasets to the HDF5 file"""
for dset in self.datasets:

if 'IMAGE' in dset.data_type.name:

write_nsid_dataset(dset, self.h5_img_grp)

elif 'SPECTRUM' in dset.data_type.name:

write_nsid_dataset(dset, self.h5_spectra_grp)

else:

write_nsid_dataset(dset, self.h5_spectrogram_grp)

self.h5_f.file.close()

return
10 changes: 6 additions & 4 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,17 @@

# TODO: Move requirements to requirements.txt
requirements = [ # basic
'setuptools==58.2.0',
'numpy',
'toolz', # dask installation failing without this
'cytoolz', # dask installation failing without this
'dask>=2.20.0',
'sidpy>=0.11.2',
'numba==0.58; python_version < "3.11"',
'numba>=0.59.0rc1; python_version >= "3.11"',
'numba==0.58; python_version < "3.10"',
'numba>=0.59.0rc1; python_version >= "3.10"',
'ipython>=7.1.0',
'pyUSID',
'gdown',
# generic:
# Reader specific ones go to extras
]
Expand Down Expand Up @@ -53,10 +55,10 @@
author_email='[email protected]',
install_requires=requirements,
setup_requires=['pytest-runner'],
tests_require=['unitest', 'pytest', 'pywget', 'hyperspy', 'pyUSID', 'gwyfile'],
tests_require=['pytest', 'pywget', 'hyperspy', 'pyUSID', 'gwyfile', 'gdown'],
platforms=['Linux', 'Mac OSX', 'Windows 11/10/8.1/8/7'],
# package_data={'sample':['dataset_1.dat']}
test_suite='unittest',
test_suite='pytest',
# dependency='',
# dependency_links=[''],
include_package_data=True,
Expand Down
34 changes: 34 additions & 0 deletions tests/readers/microscopy/spm/afm/test_arhdf5.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import pytest
import sidpy
import SciFiReaders as sr
from pywget import wget
import os
import gdown
try:
import gdown
except ImportError:
import pip
pip.main(['install', 'gdown'])

@pytest.fixture
def arhdf5_file():
file_path = 'PTO_SS_00.h5'
gdown.download('https://drive.google.com/uc?id=10LpXdpm2tPiGEE_rqKlrIkZhaYkP_YBs', file_path, quiet=False)
yield file_path
os.remove(file_path)

def test_load_test_arhdf5_file(arhdf5_file):
data_translator = sr.ARhdf5Reader(arhdf5_file)
datasets = data_translator.read(verbose=False)
test_data = datasets[1:6]
assert len(test_data) == 5, f"Length of dataset should be 5 but is instead {len(test_data)}"
channel_names = ['Defl', 'Amp', 'Phase', 'Phas2', 'Freq']
channel_units = ['m', 'm', 'deg', 'deg', 'Hz']
channel_labels = [['x (m)', 'y (m)', 'z (s)'], ['x (m)', 'y (m)', 'z (s)'], ['x (m)', 'y (m)', 'z (s)'], ['x (m)', 'y (m)', 'z (s)'], ['x (m)', 'y (m)', 'z (s)']]
for ind, dataset in enumerate(test_data):
assert isinstance(dataset, sidpy.sid.dataset.Dataset), f"Dataset No. {ind} not read in as sidpy dataset but was instead read in as {type(dataset)}"
assert dataset.shape[0] == 64, f"Dataset[{ind}] is of size 64 but was read in as {dataset.shape[0]}"
assert isinstance(dataset._axes[0], sidpy.sid.dimension.Dimension), "Dataset should have dimension type of sidpy Dimension, but is instead {}".format(type(dataset._axes))
assert dataset.quantity == channel_names[ind], "Dataset having inconsistent channel names"
assert dataset.units == channel_units[ind], "Dataset having inconsistent unit names"
assert dataset.labels == channel_labels[ind], "Dataset having inconsistent channel labels"
2 changes: 1 addition & 1 deletion tests/readers/microscopy/spm/afm/test_gwy.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def gwy_file():
def test_load_test_gwy_file(gwy_file):
data_translator = sr.GwyddionReader(gwy_file)
datasets = data_translator.read(verbose=False)
assert len(datasets) == 4, f"Length of dataset should be 2 but is instead {len(datasets)}"
assert len(datasets) == 4, f"Length of dataset should be 4 but is instead {len(datasets)}"
channel_names = ['HeightRetrace', 'AmplitudeRetrace', 'DeflectionRetrace', 'PhaseRetrace']
channel_units = ['m', 'm', 'm', 'deg']
channel_labels = [['x (m)', 'y (m)'], ['x (m)', 'y (m)'], ['x (m)', 'y (m)'], ['x (m)', 'y (m)']]
Expand Down
Loading