From e35bc9c9903a0a7cb303ac0ce38962421185e457 Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Mon, 9 Dec 2024 15:21:47 +0100 Subject: [PATCH 01/12] Cache _have_daqmx_objects if order_objects did not change --- nptdms/tdms_segment.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index a48893b..576b23e 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -45,6 +45,7 @@ class TdmsSegment(object): 'final_chunk_lengths_override', 'object_index', 'segment_incomplete', + 'has_daqmx_objects_cached', ] def __init__(self, position, toc_mask, next_segment_pos, data_position, segment_incomplete): @@ -57,6 +58,7 @@ def __init__(self, position, toc_mask, next_segment_pos, data_position, segment_ self.ordered_objects = None self.object_index = None self.segment_incomplete = segment_incomplete + self.has_daqmx_objects_cached = None def __repr__(self): return "<TdmsSegment at position %d>" % self.position @@ -135,6 +137,8 @@ def read_segment_objects(self, file, previous_segment_objects, index_cache, prev if index_cache is not None: self.object_index = index_cache.get_index(self.ordered_objects) + + self._invalidate_cached_values() self._calculate_chunks() return properties @@ -194,6 +198,7 @@ def _reuse_previous_object( segment_obj.has_data = True segment_obj.read_raw_data_index(file, raw_data_index_header, endianness) self.ordered_objects.append(segment_obj) + self._invalidate_cached_values() def _reuse_previous_segment_metadata(self, previous_segment): try: @@ -383,6 +388,9 @@ def _get_data_reader(self): return ContiguousDataReader(self.num_chunks, self.final_chunk_lengths_override, endianness) def _have_daqmx_objects(self): + if self.has_daqmx_objects_cached is not None: + return self.has_daqmx_objects_cached + data_obj_count = 0 daqmx_count = 0 for o in self.ordered_objects: @@ -391,12 +399,12 @@ def _have_daqmx_objects(self): if isinstance(o, DaqmxSegmentObject): daqmx_count += 1 if daqmx_count == 0: - return False - if daqmx_count == data_obj_count: - return True - if daqmx_count > 0: + self.has_daqmx_objects_cached = False + elif daqmx_count == data_obj_count: + self.has_daqmx_objects_cached = True + elif daqmx_count > 0: raise Exception("Cannot read mixed DAQmx and non-DAQmx data") - return False + return self.has_daqmx_objects_cached def _have_interleaved_data(self): """ Whether data in this segment is interleaved. Assumes data is not DAQmx. @@ -420,6 +428,8 @@ def _have_interleaved_data(self): else: raise ValueError("Cannot read interleaved segment containing channels with unsized types") + def _invalidate_cached_values(self): + self.has_daqmx_objects_cached = None class InterleavedDataReader(BaseDataReader): """ Reads data in a TDMS segment with interleaved data From 0aaaade9d31b74db433d42bb9034dba669155fdd Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Mon, 9 Dec 2024 16:31:01 +0100 Subject: [PATCH 02/12] Cache chunk_size --- nptdms/tdms_segment.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index 576b23e..70cf13b 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -46,6 +46,7 @@ class TdmsSegment(object): 'object_index', 'segment_incomplete', 'has_daqmx_objects_cached', + 'chunk_size_cached', ] def __init__(self, position, toc_mask, next_segment_pos, data_position, segment_incomplete): @@ -59,6 +60,7 @@ def __init__(self, position, toc_mask, next_segment_pos, data_position, segment_ self.object_index = None self.segment_incomplete = segment_incomplete self.has_daqmx_objects_cached = None + self.chunk_size_cached = None def __repr__(self): return "<TdmsSegment at position %d>" % self.position @@ -271,7 +273,6 @@ def read_raw_data_for_channel(self, f, channel_path, chunk_offset=0, num_chunks= # Ensure we're working with Python ints as np.int32 values could overflow # (https://github.com/adamreeve/npTDMS/issues/338) - chunk_size = int(chunk_size) chunk_offset = int(chunk_offset) if chunk_offset > 0: @@ -356,11 +357,15 @@ def _new_segment_object(self, object_path, raw_data_index_header): return TdmsSegmentObject(object_path) def _get_chunk_size(self): + if self.chunk_size_cached is not None: + return self.chunk_size_cached + if self._have_daqmx_objects(): - return get_daqmx_chunk_size(self.ordered_objects) - return sum( - o.data_size - for o in self.ordered_objects if o.has_data) + self.chunk_size_cached = int(get_daqmx_chunk_size(self.ordered_objects)) + return self.chunk_size_cached + + self.chunk_size_cached = int(sum(o.data_size for o in self.ordered_objects if o.has_data)) + return self.chunk_size_cached def _read_data_chunks(self, file, data_objects, num_chunks): """ Read multiple data chunks at once @@ -428,8 +433,10 @@ def _have_interleaved_data(self): else: raise ValueError("Cannot read interleaved segment containing channels with unsized types") + def _invalidate_cached_values(self): self.has_daqmx_objects_cached = None + self.chunk_size_cached = None class InterleavedDataReader(BaseDataReader): """ Reads data in a TDMS segment with interleaved data From 8572b8875af130fc73c0999ec98a9b4618d4022a Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Mon, 9 Dec 2024 16:38:56 +0100 Subject: [PATCH 03/12] Cache data_objects --- nptdms/tdms_segment.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index 70cf13b..1fc10a1 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -47,6 +47,7 @@ class TdmsSegment(object): 'segment_incomplete', 'has_daqmx_objects_cached', 'chunk_size_cached', + 'data_objects_cached', ] def __init__(self, position, toc_mask, next_segment_pos, data_position, segment_incomplete): @@ -61,6 +62,7 @@ def __init__(self, position, toc_mask, next_segment_pos, data_position, segment_ self.segment_incomplete = segment_incomplete self.has_daqmx_objects_cached = None self.chunk_size_cached = None + self.data_objects_cached = None def __repr__(self): return "<TdmsSegment at position %d>" % self.position @@ -268,7 +270,6 @@ def read_raw_data_for_channel(self, f, channel_path, chunk_offset=0, num_chunks= f.seek(self.data_position) - data_objects = [o for o in self.ordered_objects if o.has_data] chunk_size = self._get_chunk_size() # Ensure we're working with Python ints as np.int32 values could overflow @@ -278,7 +279,7 @@ def read_raw_data_for_channel(self, f, channel_path, chunk_offset=0, num_chunks= if chunk_offset > 0: f.seek(chunk_size * chunk_offset, os.SEEK_CUR) stop_chunk = self.num_chunks if num_chunks is None else num_chunks + chunk_offset - for chunk in self._read_channel_data_chunks(f, data_objects, channel_path, chunk_offset, stop_chunk): + for chunk in self._read_channel_data_chunks(f, self._get_data_objects(), channel_path, chunk_offset, stop_chunk): yield chunk def _calculate_chunks(self): @@ -433,10 +434,18 @@ def _have_interleaved_data(self): else: raise ValueError("Cannot read interleaved segment containing channels with unsized types") + def _get_data_objects(self): + if self.data_objects_cached is not None: + return self.data_objects_cached + + self.data_objects_cached = [o for o in self.ordered_objects if o.has_data] + return self.data_objects_cached def _invalidate_cached_values(self): self.has_daqmx_objects_cached = None self.chunk_size_cached = None + self.data_objects_cached = None + class InterleavedDataReader(BaseDataReader): """ Reads data in a TDMS segment with interleaved data From 5d2903d643ecfc6ecf7d8c12175d4036d9875f64 Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Mon, 9 Dec 2024 16:59:51 +0100 Subject: [PATCH 04/12] Reduce amount of seek calls --- nptdms/tdms_segment.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index 1fc10a1..1f353d0 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -518,20 +518,22 @@ def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path """ Read data from a chunk for a single channel """ channel_data = RawChannelDataChunk.empty() + current_position = file.tell() for obj in data_objects: number_values = self._get_channel_number_values(obj, chunk_index) if obj.path == channel_path: + file.seek(current_position) channel_data = RawChannelDataChunk.channel_data(obj.read_values(file, number_values, self.endianness)) + current_position = file.tell() elif number_values == obj.number_values: # Seek over data for other channel data - file.seek(obj.data_size, os.SEEK_CUR) + current_position += obj.data_size else: # In last chunk with reduced chunk size - if obj.data_type.size is None: - # Type is unsized (eg. string), try reading number of values - obj.read_values(file, number_values, self.endianness) - else: - file.seek(obj.data_type.size * number_values, os.SEEK_CUR) + if obj.data_type.size is not None: + current_position += obj.data_type.size * number_values + + file.seek(current_position) return channel_data def _get_channel_number_values(self, obj, chunk_index): From ee3d836256f4db158883fea75231c3f427f4915e Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Mon, 9 Dec 2024 17:09:40 +0100 Subject: [PATCH 05/12] Faster eval of expression --- nptdms/tdms_segment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index 1f353d0..ae24a37 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -537,7 +537,7 @@ def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path return channel_data def _get_channel_number_values(self, obj, chunk_index): - if chunk_index == (self.num_chunks - 1) and self.final_chunk_lengths_override is not None: + if self.final_chunk_lengths_override is not None and chunk_index == (self.num_chunks - 1): return self.final_chunk_lengths_override.get(obj.path, 0) else: return obj.number_values From 57c2e7d51e3dd9501f7325d4eb5d1cf828f7d893 Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Mon, 9 Dec 2024 20:53:18 +0100 Subject: [PATCH 06/12] don't iterate over all objects use chunk size and index to skip to expected chunk --- nptdms/base_segment.py | 6 +++--- nptdms/tdms_segment.py | 18 ++++++++++-------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/nptdms/base_segment.py b/nptdms/base_segment.py index 3f40642..f3b5738 100644 --- a/nptdms/base_segment.py +++ b/nptdms/base_segment.py @@ -56,14 +56,14 @@ def _read_data_chunk(self, file, data_objects, chunk_index): """ raise NotImplementedError("Data chunk reading must be implemented in base classes") - def read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk): + def read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk, chunk_size): """ Read multiple data chunks for a single channel at once In the base case we read each chunk individually but subclasses can override this """ for chunk_index in range(chunk_offset, stop_chunk): - yield self._read_channel_data_chunk(file, data_objects, chunk_index, channel_path) + yield self._read_channel_data_chunk(file, data_objects, chunk_index, channel_path, chunk_size) - def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path): + def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path, chunk_size): """ Read data from a chunk for a single channel """ # In the base case we can read data for all channels diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index ae24a37..5776c84 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -279,7 +279,7 @@ def read_raw_data_for_channel(self, f, channel_path, chunk_offset=0, num_chunks= if chunk_offset > 0: f.seek(chunk_size * chunk_offset, os.SEEK_CUR) stop_chunk = self.num_chunks if num_chunks is None else num_chunks + chunk_offset - for chunk in self._read_channel_data_chunks(f, self._get_data_objects(), channel_path, chunk_offset, stop_chunk): + for chunk in self._read_channel_data_chunks(f, self._get_data_objects(), channel_path, chunk_offset, stop_chunk, chunk_size): yield chunk def _calculate_chunks(self): @@ -376,13 +376,15 @@ def _read_data_chunks(self, file, data_objects, num_chunks): for chunk in reader.read_data_chunks(file, data_objects, num_chunks): yield chunk - def _read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk): + def _read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk, chunk_size): """ Read multiple data chunks for a single channel at once In the base case we read each chunk individually but subclasses can override this """ reader = self._get_data_reader() - for chunk in reader.read_channel_data_chunks(file, data_objects, channel_path, chunk_offset, stop_chunk): + initial_position = file.tell() + for i, chunk in enumerate(reader.read_channel_data_chunks(file, data_objects, channel_path, chunk_offset, stop_chunk, chunk_size)): yield chunk + file.seek(initial_position + (i + 1) * chunk_size) def _get_data_reader(self): endianness = '>' if (self.toc_mask & toc_properties['kTocBigEndian']) else '<' @@ -462,7 +464,7 @@ def read_data_chunks(self, file, data_objects, num_chunks): raise ValueError("Cannot read interleaved data with different chunk sizes") return [self._read_interleaved_chunks(file, data_objects, num_chunks)] - def read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk): + def read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk, chunk_size): """ Read multiple data chunks for a single channel at once """ num_chunks = stop_chunk - chunk_offset @@ -514,7 +516,7 @@ def _read_data_chunk(self, file, data_objects, chunk_index): object_data[obj.path] = obj.read_values(file, number_values, self.endianness) return RawDataChunk.channel_data(object_data) - def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path): + def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path, chunk_size): """ Read data from a chunk for a single channel """ channel_data = RawChannelDataChunk.empty() @@ -525,13 +527,13 @@ def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path file.seek(current_position) channel_data = RawChannelDataChunk.channel_data(obj.read_values(file, number_values, self.endianness)) current_position = file.tell() + break elif number_values == obj.number_values: # Seek over data for other channel data current_position += obj.data_size - else: + elif obj.data_type.size is not None: # In last chunk with reduced chunk size - if obj.data_type.size is not None: - current_position += obj.data_type.size * number_values + current_position += obj.data_type.size * number_values file.seek(current_position) return channel_data From f05f2304266b21fa10041d4225ac2261ccb95be5 Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Mon, 9 Dec 2024 22:33:30 +0100 Subject: [PATCH 07/12] Fix code style --- nptdms/tdms_segment.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index 5776c84..4f539bb 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -279,7 +279,9 @@ def read_raw_data_for_channel(self, f, channel_path, chunk_offset=0, num_chunks= if chunk_offset > 0: f.seek(chunk_size * chunk_offset, os.SEEK_CUR) stop_chunk = self.num_chunks if num_chunks is None else num_chunks + chunk_offset - for chunk in self._read_channel_data_chunks(f, self._get_data_objects(), channel_path, chunk_offset, stop_chunk, chunk_size): + for chunk in self._read_channel_data_chunks( + f, self._get_data_objects(), channel_path, chunk_offset, stop_chunk, chunk_size + ): yield chunk def _calculate_chunks(self): @@ -382,7 +384,9 @@ def _read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offs """ reader = self._get_data_reader() initial_position = file.tell() - for i, chunk in enumerate(reader.read_channel_data_chunks(file, data_objects, channel_path, chunk_offset, stop_chunk, chunk_size)): + for i, chunk in enumerate(reader.read_channel_data_chunks( + file, data_objects, channel_path, chunk_offset, stop_chunk, chunk_size + )): yield chunk file.seek(initial_position + (i + 1) * chunk_size) From 9e34b0f172b3ceb4a343961a3b4d211058423e55 Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Tue, 10 Dec 2024 10:54:11 +0100 Subject: [PATCH 08/12] Remove redundant seek --- nptdms/tdms_segment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index 4f539bb..0a83634 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -539,7 +539,6 @@ def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path # In last chunk with reduced chunk size current_position += obj.data_type.size * number_values - file.seek(current_position) return channel_data def _get_channel_number_values(self, obj, chunk_index): From 9e9976758a33c288edf3256d9cf63d771cb54407 Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Tue, 10 Dec 2024 10:54:27 +0100 Subject: [PATCH 09/12] Remove useless cache invalidation --- nptdms/tdms_segment.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index 0a83634..d7910b5 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -142,7 +142,6 @@ def read_segment_objects(self, file, previous_segment_objects, index_cache, prev if index_cache is not None: self.object_index = index_cache.get_index(self.ordered_objects) - self._invalidate_cached_values() self._calculate_chunks() return properties @@ -202,7 +201,6 @@ def _reuse_previous_object( segment_obj.has_data = True segment_obj.read_raw_data_index(file, raw_data_index_header, endianness) self.ordered_objects.append(segment_obj) - self._invalidate_cached_values() def _reuse_previous_segment_metadata(self, previous_segment): try: @@ -447,11 +445,6 @@ def _get_data_objects(self): self.data_objects_cached = [o for o in self.ordered_objects if o.has_data] return self.data_objects_cached - def _invalidate_cached_values(self): - self.has_daqmx_objects_cached = None - self.chunk_size_cached = None - self.data_objects_cached = None - class InterleavedDataReader(BaseDataReader): """ Reads data in a TDMS segment with interleaved data From c29dd26b3dcca6fcae288540a146c6153669b2ce Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Wed, 11 Dec 2024 08:36:19 +0100 Subject: [PATCH 10/12] Add exception for truncated segments --- nptdms/tdms_segment.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index d7910b5..61dba55 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -531,6 +531,8 @@ def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path elif obj.data_type.size is not None: # In last chunk with reduced chunk size current_position += obj.data_type.size * number_values + else: + raise Exception("Cannot skip over channel with unsized type in a truncated segment") return channel_data From 33056c596b43ee9a763ede2d16cb30a48520dfd9 Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Wed, 11 Dec 2024 08:36:42 +0100 Subject: [PATCH 11/12] Update gitignore to ignore PyCharm projects --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 384e350..8f159db 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ coverage.xml .ipynb_checkpoints .vscode *.ipynb +.idea # Wercker directories _builds From 0c217306fbdf28a6aaa3032f055ef62227c2689e Mon Sep 17 00:00:00 2001 From: "Loibl Johannes (IFAG DES PTS TI EA DE)" <Johannes.Loibl@infineon.com> Date: Wed, 11 Dec 2024 10:08:03 +0100 Subject: [PATCH 12/12] Remove unneeded chunk_size args --- nptdms/base_segment.py | 6 +++--- nptdms/tdms_segment.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/nptdms/base_segment.py b/nptdms/base_segment.py index f3b5738..3f40642 100644 --- a/nptdms/base_segment.py +++ b/nptdms/base_segment.py @@ -56,14 +56,14 @@ def _read_data_chunk(self, file, data_objects, chunk_index): """ raise NotImplementedError("Data chunk reading must be implemented in base classes") - def read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk, chunk_size): + def read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk): """ Read multiple data chunks for a single channel at once In the base case we read each chunk individually but subclasses can override this """ for chunk_index in range(chunk_offset, stop_chunk): - yield self._read_channel_data_chunk(file, data_objects, chunk_index, channel_path, chunk_size) + yield self._read_channel_data_chunk(file, data_objects, chunk_index, channel_path) - def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path, chunk_size): + def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path): """ Read data from a chunk for a single channel """ # In the base case we can read data for all channels diff --git a/nptdms/tdms_segment.py b/nptdms/tdms_segment.py index 61dba55..cacde39 100644 --- a/nptdms/tdms_segment.py +++ b/nptdms/tdms_segment.py @@ -383,7 +383,7 @@ def _read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offs reader = self._get_data_reader() initial_position = file.tell() for i, chunk in enumerate(reader.read_channel_data_chunks( - file, data_objects, channel_path, chunk_offset, stop_chunk, chunk_size + file, data_objects, channel_path, chunk_offset, stop_chunk )): yield chunk file.seek(initial_position + (i + 1) * chunk_size) @@ -461,7 +461,7 @@ def read_data_chunks(self, file, data_objects, num_chunks): raise ValueError("Cannot read interleaved data with different chunk sizes") return [self._read_interleaved_chunks(file, data_objects, num_chunks)] - def read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk, chunk_size): + def read_channel_data_chunks(self, file, data_objects, channel_path, chunk_offset, stop_chunk): """ Read multiple data chunks for a single channel at once """ num_chunks = stop_chunk - chunk_offset @@ -513,7 +513,7 @@ def _read_data_chunk(self, file, data_objects, chunk_index): object_data[obj.path] = obj.read_values(file, number_values, self.endianness) return RawDataChunk.channel_data(object_data) - def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path, chunk_size): + def _read_channel_data_chunk(self, file, data_objects, chunk_index, channel_path): """ Read data from a chunk for a single channel """ channel_data = RawChannelDataChunk.empty()