diff --git a/.github/workflows/pytest.yml b/.github/workflows/daily_tests.yml similarity index 57% rename from .github/workflows/pytest.yml rename to .github/workflows/daily_tests.yml index caec23d..b1b501e 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/daily_tests.yml @@ -1,25 +1,24 @@ -# This workflow will install Python dependencies and run tests and lint with a single version of Python -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -# Based on David Neuroth pylpg - -name: pytest - -on: - push: - pull_request: - branches: [ master ] -#if [ -f requirements.txt ]; then pip install -r requirements.txt; fi +# This tests are run daily to check incompatibilties introduced by new versions of dependencies +name: Daily tsam tests +on: + schedule: + # * is a special character in YAML so you have to quote this string + # Some Examples for cron syntax https://crontab.guru/examples.html + # Schedules job at any point after 12 pm + - cron: '0 0 * * *' + # Weekly after sunday + # - cron: 0 0 * * 0 jobs: - build: + PythonAndOsTest: + name: Test for Python ${{matrix.python-version}} on ${{matrix.os}} runs-on: ${{matrix.os}} strategy: fail-fast: false matrix: os: ["ubuntu-latest","ubuntu-20.04", "macos-latest","macos-13","macos-12", "windows-latest","windows-2019"] # os: ["ubuntu-latest"] - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: [ "3.9", "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v2 @@ -40,4 +39,5 @@ jobs: working-directory: ./test/ run: | pytest - codecov \ No newline at end of file + codecov + diff --git a/.github/workflows/test_on_push_and_pull.yml b/.github/workflows/test_on_push_and_pull.yml new file mode 100644 index 0000000..78cb189 --- /dev/null +++ b/.github/workflows/test_on_push_and_pull.yml @@ -0,0 +1,71 @@ +# This workflow will install Python dependencies and run tests and lint with a single version of Python +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +# Based on David Neuroth pylpg + +name: Test on Push and Pull + +on: + push: + pull_request: + branches: [ master ] + +jobs: + PythonAndOsTest: + name: Test for Python ${{matrix.python-version}} on ${{matrix.os}} + runs-on: ${{matrix.os}} + strategy: + fail-fast: false + matrix: + os: ["ubuntu-latest","ubuntu-20.04", "macos-latest","macos-13","macos-12", "windows-latest","windows-2019"] + python-version: [ "3.9", "3.10", "3.11", "3.12"] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.10 + uses: actions/setup-python@v2 + with: + python-version: ${{matrix.python-version}} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest + pip install pytest-cov + pip install codecov + pip install -r requirements.txt + pip install --no-cache-dir -e . + + - name: Test with pytest + working-directory: ./test/ + run: | + pytest + codecov + + NumpyTest: + name: Test for numpy ${{matrix.python-numpy-version.numpy}} and python ${{matrix.python-numpy-version.python}} + runs-on: ${{matrix.os}} + strategy: + fail-fast: false + matrix: + os: ["ubuntu-latest","ubuntu-20.04", "macos-latest","macos-13","macos-12", "windows-latest","windows-2019"] + python-numpy-version: [ {python : 3.9,numpy : 1.25}, {python : 3.9,numpy : 1.26},{python : 3.9,numpy : 2.0}] + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.10 + uses: actions/setup-python@v2 + with: + python-version: ${{matrix.python-numpy-version.python}} + - name: Install dependencies + run: | + python -m pip install numpy==${{matrix.python-numpy-version.numpy}} --upgrade pip + pip install pytest + pip install pytest-cov + pip install codecov + pip install -r requirements.txt + pip install --no-cache-dir -e . + + - name: Test with pytest + working-directory: ./test/ + run: | + pytest + codecov \ No newline at end of file diff --git a/Makefile b/Makefile index f1006b5..bcb85d9 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,21 @@ +#!make +LOCAL_VENVS_DIR=~/.virtualenvs +PROJECT_NAME=tsam +PYTHON=python3.11 +LOCAL_VENV_DIR := ${LOCAL_VENVS_DIR}/${PROJECT_NAME} + + test: - pytest + @( \ + source ${LOCAL_VENV_DIR}/bin/activate; \ + pytest + ) -sdist : - python setup.py sdist +sdist: + @( \ + source ${LOCAL_VENV_DIR}/bin/activate; \ + ${PYTHON} setup.py sdist + ) upload: twine upload dist/* @@ -12,3 +25,9 @@ clean: dist: sdist upload clean + + +setup_venv: + mkdir -p ${LOCAL_VENVS_DIR} + ${PYTHON} -m venv ${LOCAL_VENV_DIR} + . ${LOCAL_VENV_DIR}/bin/activate; pip install -r requirements.txt; pip install -e . diff --git a/README.md b/README.md index 9bf87d2..e7275d2 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![pytest master status](https://github.com/FZJ-IEK3-VSA/tsam/actions/workflows/pytest.yml/badge.svg?branch=master)](https://github.com/FZJ-IEK3-VSA/tsam/actions) [![Version](https://img.shields.io/pypi/v/tsam.svg)](https://pypi.python.org/pypi/tsam) [![Documentation Status](https://readthedocs.org/projects/tsam/badge/?version=latest)](https://tsam.readthedocs.io/en/latest/) [![PyPI - License](https://img.shields.io/pypi/l/tsam)]((https://github.com/FZJ-IEK3-VSA/tsam/blob/master/LICENSE.txt)) [![codecov](https://codecov.io/gh/FZJ-IEK3-VSA/tsam/branch/master/graph/badge.svg)](https://codecov.io/gh/FZJ-IEK3-VSA/tsam) +[![pytest master status](https://github.com/FZJ-IEK3-VSA/tsam/actions/workflows/pytest.yml/badge.svg?branch=master)](https://github.com/FZJ-IEK3-VSA/tsam/actions) [![Version](https://img.shields.io/pypi/v/tsam.svg)](https://pypi.python.org/pypi/tsam) [![Conda Version](https://img.shields.io/conda/vn/conda-forge/tsam.svg)](https://anaconda.org/conda-forge/tsam) [![Documentation Status](https://readthedocs.org/projects/tsam/badge/?version=latest)](https://tsam.readthedocs.io/en/latest/) [![PyPI - License](https://img.shields.io/pypi/l/tsam)]((https://github.com/FZJ-IEK3-VSA/tsam/blob/master/LICENSE.txt)) [![codecov](https://codecov.io/gh/FZJ-IEK3-VSA/tsam/branch/master/graph/badge.svg)](https://codecov.io/gh/FZJ-IEK3-VSA/tsam) [![badge](https://img.shields.io/badge/launch-binder-579aca.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC)](https://mybinder.org/v2/gh/FZJ-IEK3-VSA/voila-tsam/HEAD?urlpath=voila/render/Time-Series-Aggregation-Module.ipynb) Forschungszentrum Juelich Logo @@ -19,10 +19,14 @@ The documentation of the tsam code can be found [**here**](https://tsam.readthed ## Installation -Directly install via pip as follows: +Directly install via pip from pypi as follows: pip install tsam +of install from conda forge with the following command: + + conda install tsam -c conda-forge + Alternatively, clone a local copy of the repository to your computer git clone https://github.com/FZJ-IEK3-VSA/tsam.git @@ -37,6 +41,12 @@ Or install directly via python as python setup.py install In order to use the k-medoids clustering, make sure that you have installed a MILP solver. As default [HiGHS](https://github.com/ERGO-Code/HiGHS) is used. Nevertheless, in case you have access to a license we recommend commercial solvers (e.g. Gurobi or CPLEX) since they have a better performance. + +### Developer installation + +In order to setup a virtual environment in Linux, correct the python name in the Makefile and call + + make setup_venv ## Examples diff --git a/requirements.txt b/requirements.txt index fbbb92c..499af2c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ scikit-learn>=0.0 -pandas>=0.18.1,<3.0 -numpy>=1.11.0,<2.0 +pandas>=2.0.3 +numpy>=1.20.0 pyomo>=6.4.3 networkx tqdm diff --git a/requirements.yml b/requirements.yml index 369ce6d..19ebf38 100644 --- a/requirements.yml +++ b/requirements.yml @@ -2,7 +2,7 @@ name: tsam channels: - conda-forge dependencies: - - python>=3.8,<3.13 + - python>=3.9,<3.13 - pip - pip: - -r requirements.txt \ No newline at end of file diff --git a/requirements_dev.txt b/requirements_dev.txt index e844f99..4604eb9 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -2,9 +2,10 @@ # Testing pytest -sphinx +pytest-cov # Documentation +sphinx sphinx-autobuild sphinx_book_theme diff --git a/requirements_dev.yml b/requirements_dev.yml index 26b54c0..cd66f3e 100644 --- a/requirements_dev.yml +++ b/requirements_dev.yml @@ -4,5 +4,8 @@ channels: - conda-forge dependencies: - - pytest - - pytest-cov + - python>=3.9,<3.13 + - pip + - pip: + - -r requirements_dev.txt + diff --git a/setup.py b/setup.py index b071f1a..c40ead7 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ setuptools.setup( name="tsam", - version="2.3.2", + version="2.3.3", author="Leander Kotzur, Maximilian Hoffmann", author_email="leander.kotzur@googlemail.com, maximilian.hoffmann@julumni.fz-juelich.de", description="Time series aggregation module (tsam) to create typical periods", @@ -16,6 +16,7 @@ long_description_content_type="text/markdown", url="https://github.com/FZJ-IEK3-VSA/tsam", include_package_data=True, + python_requires='>=3.9', packages=setuptools.find_packages(), install_requires=required_packages, setup_requires=["setuptools-git"], @@ -28,11 +29,7 @@ "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.4", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Software Development :: Libraries :: Python Modules", ], diff --git a/test/test_assert_raises.py b/test/test_assert_raises.py index 6b6b0b5..af5dee7 100644 --- a/test/test_assert_raises.py +++ b/test/test_assert_raises.py @@ -221,7 +221,7 @@ def test_assert_raises(): # check erroneous dataframe containing NaN values rawNan = copy.deepcopy((raw)) - rawNan.iloc[10, :] = np.NaN + rawNan.iloc[10, :] = np.nan aggregation = tsam.TimeSeriesAggregation(timeSeries=rawNan) np.testing.assert_raises_regex( ValueError, diff --git a/test/test_cluster_order.py b/test/test_cluster_order.py index 0948755..32e1105 100644 --- a/test/test_cluster_order.py +++ b/test/test_cluster_order.py @@ -103,21 +103,21 @@ def test_cluster_order(): orig_raw_predefClusterOrder[typPeriods_predefClusterOrder.columns] .unstack() .loc[sortedDaysOrig1, :] - .stack() + .stack(future_stack=True,) ) - test1 = typPeriods_predefClusterOrder.unstack().loc[sortedDaysTest1, :].stack() + test1 = typPeriods_predefClusterOrder.unstack().loc[sortedDaysTest1, :].stack(future_stack=True,) orig2 = ( orig_raw_predefClusterOrderAndClusterCenters[ typPeriods_predefClusterOrderAndClusterCenters.columns ] .unstack() .loc[sortedDaysOrig2, :] - .stack() + .stack(future_stack=True,) ) test2 = ( typPeriods_predefClusterOrderAndClusterCenters.unstack() .loc[sortedDaysTest2, :] - .stack() + .stack(future_stack=True,) ) np.testing.assert_array_almost_equal( diff --git a/test/test_hierarchical.py b/test/test_hierarchical.py index ac74406..ec37616 100644 --- a/test/test_hierarchical.py +++ b/test/test_hierarchical.py @@ -46,8 +46,8 @@ def test_hierarchical(): sortedDaysTest = typPeriods.groupby(level=0).sum().sort_values("GHI").index # rearange their order - orig = orig_raw[typPeriods.columns].unstack().loc[sortedDaysOrig, :].stack() - test = typPeriods.unstack().loc[sortedDaysTest, :].stack() + orig = orig_raw[typPeriods.columns].unstack().loc[sortedDaysOrig, :].stack(future_stack=True,) + test = typPeriods.unstack().loc[sortedDaysTest, :].stack(future_stack=True,) np.testing.assert_array_almost_equal(orig.values, test.values, decimal=4) diff --git a/test/test_hypertuneAggregation.py b/test/test_hypertuneAggregation.py index 24cb703..32d29af 100644 --- a/test/test_hypertuneAggregation.py +++ b/test/test_hypertuneAggregation.py @@ -1,5 +1,6 @@ import os import time +import pytest import pandas as pd import numpy as np @@ -74,7 +75,7 @@ def test_optimalPair(): assert windPeriods * windSegments <= len(raw["Wind"])*datareduction assert windPeriods * windSegments >= len(raw["Wind"])*datareduction * 0.8 - +@pytest.mark.skip(reason="This test is too slow") def test_steepest_gradient_leads_to_optima(): """ Based on the hint of Eva Simarik, check if the RMSE is for the optimized combination @@ -147,7 +148,7 @@ def test_paretoOptimalAggregation(): raw, hoursPerPeriod=12, clusterMethod="hierarchical", - representationMethod="durationRepresentation", + representationMethod="meanRepresentation", distributionPeriodWise=False, rescaleClusterPeriods=False, segmentation=True, diff --git a/test/test_k_medoids.py b/test/test_k_medoids.py index b373dbf..eca96ea 100644 --- a/test/test_k_medoids.py +++ b/test/test_k_medoids.py @@ -43,8 +43,8 @@ def test_k_medoids(): sortedDaysTest = typPeriods.groupby(level=0).sum().sort_values("GHI").index # rearange their order - orig = orig_raw[typPeriods.columns].unstack().loc[sortedDaysOrig, :].stack() - test = typPeriods.unstack().loc[sortedDaysTest, :].stack() + orig = orig_raw[typPeriods.columns].unstack().loc[sortedDaysOrig, :].stack(future_stack=True,) + test = typPeriods.unstack().loc[sortedDaysTest, :].stack(future_stack=True,) np.testing.assert_array_almost_equal(orig.values, test.values, decimal=4) diff --git a/test/test_segmentation.py b/test/test_segmentation.py index e7c2521..9ca5676 100644 --- a/test/test_segmentation.py +++ b/test/test_segmentation.py @@ -46,8 +46,8 @@ def test_segmentation(): sortedDaysTest = typPeriods.groupby(level=0).sum().sort_values("GHI").index # rearange their order - orig = orig_raw[typPeriods.columns].unstack().loc[sortedDaysOrig, :].stack() - test = typPeriods.unstack().loc[sortedDaysTest, :].stack() + orig = orig_raw[typPeriods.columns].unstack().loc[sortedDaysOrig, :].stack(future_stack=True,) + test = typPeriods.unstack().loc[sortedDaysTest, :].stack(future_stack=True,) np.testing.assert_array_almost_equal(orig.values, test.values, decimal=4) diff --git a/tsam/__init__.py b/tsam/__init__.py index 85b98a3..e69de29 100644 --- a/tsam/__init__.py +++ b/tsam/__init__.py @@ -1,11 +0,0 @@ -import sys - -if not sys.warnoptions: - import warnings - - warnings.filterwarnings( - action="ignore", - category=FutureWarning, - append=True, - message=r".*The previous implementation of stack is deprecated and will be removed in a future version of pandas.*", - ) \ No newline at end of file diff --git a/tsam/timeseriesaggregation.py b/tsam/timeseriesaggregation.py index 9687fc2..4746d58 100644 --- a/tsam/timeseriesaggregation.py +++ b/tsam/timeseriesaggregation.py @@ -8,7 +8,6 @@ import numpy as np from sklearn.metrics import mean_squared_error, mean_absolute_error -from sklearn.metrics.pairwise import euclidean_distances from sklearn import preprocessing from tsam.periodAggregation import aggregatePeriods @@ -27,6 +26,9 @@ MIN_WEIGHT = 1e-6 + + + def unstackToPeriods(timeSeries, timeStepsPerPeriod): """ Extend the timeseries to an integer multiple of the period length and @@ -388,21 +390,21 @@ def _check_init_args(self): try: timedelta = self.timeSeries.index[1] - self.timeSeries.index[0] self.resolution = float(timedelta.total_seconds()) / 3600 - except AttributeError: + except AttributeError as exc: raise ValueError( "'resolution' argument has to be nonnegative float or int" + " or the given timeseries needs a datetime index" - ) + ) from exc except TypeError: try: self.timeSeries.index = pd.to_datetime(self.timeSeries.index) timedelta = self.timeSeries.index[1] - self.timeSeries.index[0] self.resolution = float(timedelta.total_seconds()) / 3600 - except: + except Exception as exc: raise ValueError( "'resolution' argument has to be nonnegative float or int" + " or the given timeseries needs a datetime index" - ) + ) from exc if not (isinstance(self.resolution, int) or isinstance(self.resolution, float)): raise ValueError("resolution has to be nonnegative float or int") @@ -870,9 +872,9 @@ def _rescaleClusterPeriods(self, clusterOrder, clusterPeriods, extremeClusterIdx ) # reset values higher than the upper sacle or less than zero - typicalPeriods[column].clip(lower=0, upper=scale_ub, inplace=True) + typicalPeriods[column] = typicalPeriods[column].clip(lower=0, upper=scale_ub) - typicalPeriods[column].fillna(0.0, inplace=True) + typicalPeriods[column] = typicalPeriods[column].fillna(0.0) # calc new sum and new diff to orig data sum_clu_wo_peak = np.sum( @@ -967,7 +969,7 @@ def createTypicalPeriods(self): # check for additional cluster parameters if self.evalSumPeriods: evaluationValues = ( - self.normalizedPeriodlyProfiles.stack(level=0) + self.normalizedPeriodlyProfiles.stack(future_stack=True,level=0) .sum(axis=1) .unstack(level=1) ) @@ -1237,7 +1239,7 @@ def predictOriginalData(self): columns=self.normalizedPeriodlyProfiles.columns, index=self.normalizedPeriodlyProfiles.index, ) - clustered_data_df = clustered_data_df.stack(level="TimeStep") + clustered_data_df = clustered_data_df.stack(future_stack=True,level="TimeStep") # back in form self.normalizedPredictedData = pd.DataFrame( diff --git a/tsam/utils/durationRepresentation.py b/tsam/utils/durationRepresentation.py index 155efdb..91be97f 100644 --- a/tsam/utils/durationRepresentation.py +++ b/tsam/utils/durationRepresentation.py @@ -57,7 +57,7 @@ def durationRepresentation( # get all the values of a certain attribute and cluster candidateValues = candidates.loc[indice[0], a] # sort all values - sortedAttr = candidateValues.stack().sort_values() + sortedAttr = candidateValues.stack(future_stack=True,).sort_values() # reindex and arrange such that every sorted segment gets represented by its mean sortedAttr.index = pd.MultiIndex.from_tuples(clean_index) representationValues = sortedAttr.unstack(level=0).mean(axis=1) @@ -97,8 +97,8 @@ def durationRepresentation( # concat centroid values and cluster weights for all clusters meansAndWeights = pd.concat( [ - pd.DataFrame(np.array(meanVals)).stack(), - pd.DataFrame(np.array(clusterLengths)).stack(), + pd.DataFrame(np.array(meanVals)).stack(future_stack=True,), + pd.DataFrame(np.array(clusterLengths)).stack(future_stack=True,), ], axis=1, ) @@ -107,7 +107,7 @@ def durationRepresentation( # save order of the sorted centroid values across all clusters order = meansAndWeightsSorted.index # sort all values of the original time series - sortedAttr = candidates.loc[:, a].stack().sort_values().values + sortedAttr = candidates.loc[:, a].stack(future_stack=True,).sort_values().values # take mean of sections of the original duration curve according to the cluster and its weight the # respective section is assigned to representationValues = [] diff --git a/tsam/utils/k_medoids_contiguity.py b/tsam/utils/k_medoids_contiguity.py index 7228f51..c45310b 100644 --- a/tsam/utils/k_medoids_contiguity.py +++ b/tsam/utils/k_medoids_contiguity.py @@ -1,7 +1,14 @@ # -*- coding: utf-8 -*- import numpy as np + + import time + +# switch to numpy 2.0 +np.float_ = np.float64 +np.complex_=np.complex128 + import pyomo.environ as pyomo import pyomo.opt as opt import networkx as nx diff --git a/tsam/utils/k_medoids_exact.py b/tsam/utils/k_medoids_exact.py index 3ca7a65..7ff2aca 100644 --- a/tsam/utils/k_medoids_exact.py +++ b/tsam/utils/k_medoids_exact.py @@ -5,6 +5,11 @@ from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS from sklearn.utils import check_array + +# switch to numpy 2.0 +np.float_ = np.float64 +np.complex_=np.complex128 + import pyomo.environ as pyomo import pyomo.opt as opt from pyomo.contrib import appsi