Skip to content

Commit

Permalink
Remove Python 2.7 leftovers
Browse files Browse the repository at this point in the history
Functions `u()` and `du()` are not required anymore in Python 3. All variables are `str`.
  • Loading branch information
DimitriPapadopoulos committed Jan 26, 2025
1 parent 0c5037d commit 7eaa773
Show file tree
Hide file tree
Showing 13 changed files with 79 additions and 96 deletions.
2 changes: 1 addition & 1 deletion doc/source/dev/building_extension.rst
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ repository or use the upstream repository to get the source code::

git clone https://github.com/holgern/pyedflib.git pyedflib

Install Microsoft Visual C++ Compiler for Python 2.7 from https://www.microsoft.com/en-us/download/details.aspx?id=44266
Install Microsoft Visual C++ Compiler from https://visualstudio.microsoft.com/fr/downloads/

Activate your Python virtual environment, go to the cloned source directory
and type the following commands to build and install the package::
Expand Down
2 changes: 1 addition & 1 deletion doc/source/dev/how_to_release.rst
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ Register all files with

and upload with

```twine upload dist\filename_which_should_uploaded.whl```
```twine upload dist\filename_which_should_uploaded.whl```

Prepare for continued development
---------------------------------
Expand Down
4 changes: 2 additions & 2 deletions doc/source/dev/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,6 @@ Something not working?
----------------------

If these instructions are not clear or you need help setting up your
development environment, go ahead and open a ticket on GitHub_.
development environment, go ahead and open a ticket on GitHub_.

.. _GitHub: https://github.com/holgern/pyedflib
.. _GitHub: https://github.com/holgern/pyedflib
10 changes: 5 additions & 5 deletions doc/source/dev/preparing_windows_build_environment.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,16 @@ Preparing Windows build environment
To start developing pyedflib code on Windows you will have to install
a C compiler and prepare the build environment.

Installing Microsoft Visual C++ Compiler for Python 2.7
-------------------------------------------------------
Installing Microsoft Visual C++ Compiler
----------------------------------------

Downloading Microsoft Visual C++ Compiler for Python 2.7 from https://www.microsoft.com/en-us/download/details.aspx?id=44266.
Downloading Microsoft Visual C++ Compiler from https://visualstudio.microsoft.com/fr/downloads/.


After installing the Compiler and before compiling the extension you have
to configure some environment variables.

For build execute the ``util/setenv_win.bat`` script in the cmd
For build execute the ``util/setenv_win.bat`` script in the cmd
window:

.. sourcecode:: bat
Expand All @@ -39,4 +39,4 @@ After completing these steps continue with
.. _numpy: https://numpy.org/
.. _Cython: https://cython.org/
.. _Sphinx: https://www.sphinx-doc.org/
.. _Microsoft Visual C++ Compiler for Python 2.7: https://www.microsoft.com/en-us/download/details.aspx?id=44266
.. _Microsoft Visual C++ Compiler: https://visualstudio.microsoft.com/fr/downloads/
2 changes: 1 addition & 1 deletion doc/source/resources.rst
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ The `GitHub repository`_ is now the main
code repository.

If you are using the Mercurial repository at Bitbucket, please switch
to Git/GitHub and follow for development updates.
to Git/GitHub and follow for development updates.


Questions and bug reports
Expand Down
6 changes: 3 additions & 3 deletions pyedflib/_extensions/_pyedflib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ cdef class CyEdfReader:
"""
open(file_name, annotations_mode, check_file_size)
"""
file_name_str = file_name.encode('utf_8','strict')
file_name_str = file_name.encode(errors='strict')
result = c_edf.edfopen_file_readonly(file_name_str, &self.hdr, annotations_mode, check_file_size)

self.file_name = file_name
Expand Down Expand Up @@ -511,7 +511,7 @@ def get_handle(file_number):
return c_edf.edflib_get_handle(file_number)

def is_file_used(path):
path_byte = path.encode('utf_8','strict')
path_byte = path.encode(errors='strict')
return c_edf.edflib_is_file_used(path_byte)

# so you can use the same name if defining a python only function
Expand All @@ -521,7 +521,7 @@ def set_physical_maximum(handle, edfsignal, phys_max):
def open_file_writeonly(path, filetype, number_of_signals):
"""int edfopen_file_writeonly(char *path, int filetype, int number_of_signals)"""

py_byte_string = path.encode('utf_8','strict')
py_byte_string = path.encode(errors='strict')
cdef char* path_str = py_byte_string
return c_edf.edfopen_file_writeonly(path_str, filetype, number_of_signals)

Expand Down
42 changes: 21 additions & 21 deletions pyedflib/edfreader.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,32 +44,32 @@ def _debug_parse_header(filename: str, printout=True) -> None: # pragma: no cov
header = OrderedDict()
with open(filename, "rb") as f:
f.seek(0)
header["version"] = f.read(8).decode()
header["patient_id"] = f.read(80).decode().strip()
header["recording_id"] = f.read(80).decode().strip()
header["startdate"] = f.read(8).decode()
header["starttime"] = f.read(8).decode()
header["header_n_bytes"] = f.read(8).decode()
header["reserved"] = f.read(44).decode().strip()
header["n_records"] = f.read(8).decode()
header["record_duration"] = f.read(8).decode()
header["n_signals"] = f.read(4).decode()
header["version"] = f.read(8).decode("ascii")
header["patient_id"] = f.read(80).decode("ascii").strip()
header["recording_id"] = f.read(80).decode("ascii").strip()
header["startdate"] = f.read(8).decode("ascii")
header["starttime"] = f.read(8).decode("ascii")
header["header_n_bytes"] = f.read(8).decode("ascii")
header["reserved"] = f.read(44).decode("ascii").strip()
header["n_records"] = f.read(8).decode("ascii")
header["record_duration"] = f.read(8).decode("ascii")
header["n_signals"] = f.read(4).decode("ascii")

if printout:
print("\n##### Header")
print(json.dumps(header, indent=2))

nsigs = int(header["n_signals"])
label = [f.read(16).decode() for i in range(nsigs)]
transducer = [f.read(80).decode().strip() for i in range(nsigs)]
dimension = [f.read(8).decode().strip() for i in range(nsigs)]
pmin = [f.read(8).decode() for i in range(nsigs)]
pmax = [f.read(8).decode() for i in range(nsigs)]
dmin = [f.read(8).decode() for i in range(nsigs)]
dmax = [f.read(8).decode() for i in range(nsigs)]
prefilter = [f.read(80).decode().strip() for i in range(nsigs)]
n_samples = [f.read(8).decode() for i in range(nsigs)]
reserved = [f.read(32).decode() for i in range(nsigs)]
label = [f.read(16).decode("ascii") for i in range(nsigs)]
transducer = [f.read(80).decode("ascii").strip() for i in range(nsigs)]
dimension = [f.read(8).decode("ascii").strip() for i in range(nsigs)]
pmin = [f.read(8).decode("ascii") for i in range(nsigs)]
pmax = [f.read(8).decode("ascii") for i in range(nsigs)]
dmin = [f.read(8).decode("ascii") for i in range(nsigs)]
dmax = [f.read(8).decode("ascii") for i in range(nsigs)]
prefilter = [f.read(80).decode("ascii").strip() for i in range(nsigs)]
n_samples = [f.read(8).decode("ascii") for i in range(nsigs)]
reserved = [f.read(32).decode("ascii") for i in range(nsigs)]
_ = zip(
label,
transducer,
Expand Down Expand Up @@ -168,7 +168,7 @@ def _get_float(self, v: np.ndarray) -> np.ndarray:

def _convert_string(self, s: Union[bytes, str]) -> str:
if isinstance(s, bytes):
return s.decode("latin")
return s.decode("latin_1")
elif isinstance(s, str):
return s
else:
Expand Down
43 changes: 15 additions & 28 deletions pyedflib/edfwriter.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,17 +146,6 @@ def check_signal_header_correct(channels: List[Dict[str, Union[str, None, float]
str(ch['physical_max'])[:8]))


def u(x: bytes) -> str:
return x.decode("utf_8", "strict")


def du(x: Union[str, bytes]) -> bytes:
if isinstance(x, bytes):
return x
else:
return x.encode("utf_8")


def isstr(s: Any) -> bool:
warnings.warn("Function 'isstr' is deprecated.", DeprecationWarning, stacklevel=2)
return isinstance(s, str)
Expand Down Expand Up @@ -389,13 +378,13 @@ def update_header(self) -> None:
f'{sample_freqs=} contains non int/float'
self.record_duration = _calculate_record_duration(sample_freqs)

set_technician(self.handle, du(self.technician))
set_recording_additional(self.handle, du(self.recording_additional))
set_patientname(self.handle, du(self.patient_name))
set_patientcode(self.handle, du(self.patient_code))
set_patient_additional(self.handle, du(self.patient_additional))
set_equipment(self.handle, du(self.equipment))
set_admincode(self.handle, du(self.admincode))
set_technician(self.handle, self.technician.encode('ascii'))
set_recording_additional(self.handle, self.recording_additional.encode('ascii'))
set_patientname(self.handle, self.patient_name.encode('ascii'))
set_patientcode(self.handle, self.patient_code.encode('ascii'))
set_patient_additional(self.handle, self.patient_additional.encode('ascii'))
set_equipment(self.handle, self.equipment.encode('ascii'))
set_admincode(self.handle, self.admincode.encode('ascii'))
set_sex(self.handle, sex2int(self.sex))

set_datarecord_duration(self.handle, self.record_duration)
Expand All @@ -420,10 +409,10 @@ def update_header(self) -> None:
set_physical_minimum(self.handle, i, self.channels[i]['physical_min'])
set_digital_maximum(self.handle, i, self.channels[i]['digital_max'])
set_digital_minimum(self.handle, i, self.channels[i]['digital_min'])
set_label(self.handle, i, du(self.channels[i]['label']))
set_physical_dimension(self.handle, i, du(self.channels[i]['dimension']))
set_transducer(self.handle, i, du(self.channels[i]['transducer']))
set_prefilter(self.handle, i, du(self.channels[i]['prefilter']))
set_label(self.handle, i, self.channels[i]['label'].encode('ascii'))
set_physical_dimension(self.handle, i, self.channels[i]['dimension'].encode('ascii'))
set_transducer(self.handle, i, self.channels[i]['transducer'].encode('ascii'))
set_prefilter(self.handle, i, self.channels[i]['prefilter'].encode('ascii'))



Expand Down Expand Up @@ -1017,16 +1006,14 @@ def writeAnnotation(self, onset_in_seconds: Union[int, float], duration_in_secon

if str_format == 'utf_8':
if duration_in_seconds >= 0:
return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(np.int64), np.round(duration_in_seconds*10000).astype(int), du(description))
return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(np.int64), np.round(duration_in_seconds*10000).astype(int), description.encode('utf_8'))
else:
return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(np.int64), -1, du(description))
return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(np.int64), -1, description.encode('utf_8'))
else:
if duration_in_seconds >= 0:
# FIX: description must be bytes. string will fail in u function
return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(np.int64), np.round(duration_in_seconds*10000).astype(int), u(description).encode('latin1')) # type: ignore
return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(np.int64), np.round(duration_in_seconds*10000).astype(int), description.encode('latin1')) # type: ignore
else:
# FIX: description must be bytes. string will fail in u function
return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(np.int64), -1, u(description).encode('latin1')) # type: ignore
return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(np.int64), -1, description.encode('latin1')) # type: ignore

def close(self) -> None:
"""
Expand Down
2 changes: 1 addition & 1 deletion pyedflib/highlevel.py
Original file line number Diff line number Diff line change
Expand Up @@ -753,7 +753,7 @@ def drop_channels(
if isinstance(ch,str):
ch_idx = ch_names.index(ch.lower())
to_keep[i] = ch_idx
load_channels = list(to_keep) # copy list compatible with py2.7
load_channels = to_keep.copy()
elif to_drop is not None:
for i,ch in enumerate(to_drop):
if isinstance(ch,str):
Expand Down
24 changes: 12 additions & 12 deletions pyedflib/tests/test_edfreader.py
Original file line number Diff line number Diff line change
Expand Up @@ -374,19 +374,19 @@ def test_read_incorrect_file(self):
def test_EdfReader_Legacy_Header_Info(self):
expected_header = {
# Legacy header fields
'patient': b'Legacy patient description',
'recording': b'Legacy recording description',
'patient': 'Legacy patient description',
'recording': 'Legacy recording description',
# All the rest must be empty
'technician': b'',
'recording_additional': b'',
'patientname': b'',
'patient_additional': b'',
'patientcode': b'',
'equipment': b'',
'admincode': b'',
'sex': b'',
'gender': b'', # deprecated
'birthdate': b''
'technician': '',
'recording_additional': '',
'patientname': '',
'patient_additional': '',
'patientcode': '',
'equipment': '',
'admincode': '',
'sex': '',
'gender': '', # deprecated
'birthdate': ''
}

with pyedflib.EdfReader(self.edf_legacy) as f:
Expand Down
10 changes: 5 additions & 5 deletions pyedflib/tests/test_edfwriter.py
Original file line number Diff line number Diff line change
Expand Up @@ -715,10 +715,10 @@ def test_AnnotationWritingUTF8(self):
np.testing.assert_equal(ann_text[2], "abc")

def test_BytesChars(self):
channel_info = {'label': b'test_label', 'dimension': b'mV', 'sample_frequency': 100,
channel_info = {'label': 'test_label', 'dimension': 'mV', 'sample_frequency': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607, 'digital_min': -8388608,
'prefilter': b' ', 'transducer': b'trans1'}
'prefilter': ' ', 'transducer': 'trans1'}
f = pyedflib.EdfWriter(self.bdfplus_data_file, 1,
file_type=pyedflib.FILETYPE_BDFPLUS)
f.setSignalHeader(0,channel_info)
Expand All @@ -727,9 +727,9 @@ def test_BytesChars(self):
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writePhysicalSamples(data)
f.writeAnnotation(1.23, 0.2, b'Zaehne')
f.writeAnnotation(0.25, -1, b'Fuss')
f.writeAnnotation(1.25, 0, b'abc')
f.writeAnnotation(1.23, 0.2, 'Zaehne')
f.writeAnnotation(0.25, -1, 'Fuss')
f.writeAnnotation(1.25, 0, 'abc')

del f

Expand Down
8 changes: 2 additions & 6 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,8 @@ def get_numpy_include():
# versions.
# setuptools forgets to unset numpy's setup flag and we get a crippled
# version of it unless we do it ourselves.
try:
import __builtin__ # py2
__builtin__.__NUMPY_SETUP__ = False
except:
import builtins # py3
builtins.__NUMPY_SETUP__ = False
import builtins
builtins.__NUMPY_SETUP__ = False
import numpy as np
except ImportError as e:
try:
Expand Down
20 changes: 10 additions & 10 deletions util/authors.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def main():
authors = collections.Counter()

def analyze_line(line, names, disp=False):
line = line.strip().decode('utf-8')
line = line.strip().decode()

# Check the commit author name
m = re.match(u'^@@@([^@]*)@@@', line)
Expand All @@ -58,7 +58,7 @@ def analyze_line(line, names, disp=False):
name = NAME_MAP.get(name, name)
if disp:
if name not in names:
stdout_b.write((" - Author: %s\n" % name).encode('utf-8'))
stdout_b.write((" - Author: %s\n" % name).encode())
names.update((name,))

# Look for "thanks to" messages in the commit log
Expand All @@ -67,13 +67,13 @@ def analyze_line(line, names, disp=False):
name = m.group(2)
if name not in (u'this',):
if disp:
stdout_b.write(" - Log : %s\n" % line.strip().encode('utf-8'))
stdout_b.write(" - Log : %s\n" % line.strip().encode())
name = NAME_MAP.get(name, name)
names.update((name,))

line = line[m.end():].strip()
line = re.sub(r'^(and|, and|, ) ', u'Thanks to ', line)
analyze_line(line.encode('utf-8'), names)
analyze_line(line.encode(), names)

# Find all authors before the named range
for line in git.pipe('log', '--pretty=@@@%an@@@%n@@@%cn@@@%n%b',
Expand Down Expand Up @@ -108,9 +108,9 @@ def name_key(fullname):
n_authors = list(new_authors)
n_authors.sort(key=name_key)
# Print some empty lines to separate
stdout_b.write(("\n\n").encode('utf-8'))
stdout_b.write(("\n\n").encode())
for author in n_authors:
stdout_b.write(("- %s\n" % author).encode('utf-8'))
stdout_b.write(("- %s\n" % author).encode())
# return for early exit so we only print new authors
return

Expand All @@ -133,19 +133,19 @@ def name_key(fullname):
author_clean = author.strip('@')

if author in all_authors:
stdout_b.write((f"* {author_clean} ({count})\n").encode('utf-8'))
stdout_b.write((f"* {author_clean} ({count})\n").encode())
else:
stdout_b.write((f"* {author_clean} ({count}) +\n").encode('utf-8'))
stdout_b.write((f"* {author_clean} ({count}) +\n").encode())

stdout_b.write(("""
A total of %(count)d people contributed to this release.
People with a "+" by their names contributed a patch for the first time.
This list of names is automatically generated, and may not be fully complete.
""" % dict(count=len(authors))).encode('utf-8'))
""" % dict(count=len(authors))).encode())

stdout_b.write(("\nNOTE: Check this list manually! It is automatically generated "
"and some names\n may be missing.\n").encode('utf-8'))
"and some names\n may be missing.\n").encode())


def load_name_map(filename):
Expand Down

0 comments on commit 7eaa773

Please sign in to comment.