Skip to content

Commit

Permalink
enable python 3.10 in build matrix (#167)
Browse files Browse the repository at this point in the history
* enable python 3.10 in build matrix

* nox only run doctests if torch available
  • Loading branch information
clonker authored Nov 15, 2021
1 parent a99fafb commit 7745c96
Show file tree
Hide file tree
Showing 14 changed files with 53 additions and 52 deletions.
12 changes: 6 additions & 6 deletions deeptime/basis/src/basis_bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ np_array<std::int32_t> computePowerMatrix(std::size_t stateSpaceDim, std::size_t
}

template<typename dtype>
np_array<dtype> evaluateMonomials(ssize_t p, const np_array_nfc<dtype> &xArr,
np_array<dtype> evaluateMonomials(py::ssize_t p, const np_array_nfc<dtype> &xArr,
const np_array<std::int32_t> &powerMatrixArr) {
auto x = xArr.template unchecked<2>();
auto stateSpaceDim = x.shape(0);
Expand All @@ -106,10 +106,10 @@ np_array<dtype> evaluateMonomials(ssize_t p, const np_array_nfc<dtype> &xArr,
std::fill(outArr.mutable_data(), outArr.mutable_data() + outArr.size(), static_cast<dtype>(1));
auto out = outArr.template mutable_unchecked<2>();

for (ssize_t i = 0; i < nMonomials; ++i) {
for (ssize_t j = 0; j < stateSpaceDim; ++j) {
for (py::ssize_t i = 0; i < nMonomials; ++i) {
for (py::ssize_t j = 0; j < stateSpaceDim; ++j) {
auto power = powerMatrix(stateSpaceDim - 1 - j, i);
for (ssize_t k = 0; k < nTestPoints; ++k) {
for (py::ssize_t k = 0; k < nTestPoints; ++k) {
out(i, k) *= std::pow(x(j, k), power);
}
}
Expand All @@ -131,9 +131,9 @@ PYBIND11_MODULE(_basis_bindings, m) {
std::vector<std::string> out;
out.reserve(nMonomials);

for (ssize_t i = 0; i < nMonomials; ++i) {
for (py::ssize_t i = 0; i < nMonomials; ++i) {
std::string monFeature {};
for (ssize_t j = 0; j < stateSpaceDim; ++j) {
for (py::ssize_t j = 0; j < stateSpaceDim; ++j) {
auto power = powerMatrix.at(stateSpaceDim - 1 - j, i);

if (power != 0) {
Expand Down
16 changes: 8 additions & 8 deletions deeptime/clustering/include/bits/kmeans_bits.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ inline std::tuple<np_array<T>, np_array<int>> cluster(const np_array_nfc<T> &np_

/* do the clustering */
if (n_threads == 0) {
for (pybind11::ssize_t i = 0; i < n_frames; ++i) {
for (py::ssize_t i = 0; i < n_frames; ++i) {
int argMinDist = 0;
{
T minDist = Metric::template compute(&chunk(i, 0), &centers(0, 0), dim);
Expand All @@ -59,7 +59,7 @@ inline std::tuple<np_array<T>, np_array<int>> cluster(const np_array_nfc<T> &np_
{
assignmentsPtr[i] = argMinDist;
centers_counter.at(argMinDist)++;
for (pybind11::ssize_t j = 0; j < dim; j++) {
for (py::ssize_t j = 0; j < dim; j++) {
newCentersRef(argMinDist, j) += chunk(i, j);
}
}
Expand All @@ -69,7 +69,7 @@ inline std::tuple<np_array<T>, np_array<int>> cluster(const np_array_nfc<T> &np_
omp_set_num_threads(n_threads);

#pragma omp parallel for schedule(static, 1)
for (pybind11::ssize_t i = 0; i < n_frames; ++i) {
for (py::ssize_t i = 0; i < n_frames; ++i) {
std::vector<T> dists(n_centers);
for (std::size_t j = 0; j < n_centers; ++j) {
dists[j] = Metric::template compute(&chunk(i, 0), &centers(j, 0), dim);
Expand All @@ -82,7 +82,7 @@ inline std::tuple<np_array<T>, np_array<int>> cluster(const np_array_nfc<T> &np_
{
assignmentsPtr[i] = argMinDist;
centers_counter.at(static_cast<std::size_t>(argMinDist))++;
for (pybind11::ssize_t j = 0; j < dim; j++) {
for (py::ssize_t j = 0; j < dim; j++) {
newCentersRef(argMinDist, j) += chunk(i, j);
}
}
Expand Down Expand Up @@ -115,7 +115,7 @@ inline std::tuple<np_array<T>, np_array<int>> cluster(const np_array_nfc<T> &np_
std::unique_lock<std::mutex> lock(m);
assignmentsPtr[i] = argMinDist;
centers_counter.at(argMinDist)++;
for (pybind11::ssize_t j = 0; j < dim; j++) {
for (py::ssize_t j = 0; j < dim; j++) {
newCentersRef(argMinDist, j) += chunk(i, j);
}
}
Expand All @@ -133,11 +133,11 @@ inline std::tuple<np_array<T>, np_array<int>> cluster(const np_array_nfc<T> &np_
auto centers_counter_it = centers_counter.begin();
for (std::size_t i = 0; i < n_centers; ++i, ++centers_counter_it) {
if (*centers_counter_it == 0) {
for (pybind11::ssize_t j = 0; j < dim; ++j) {
for (py::ssize_t j = 0; j < dim; ++j) {
newCentersRef(i, j) = centers(i, j);
}
} else {
for (pybind11::ssize_t j = 0; j < dim; ++j) {
for (py::ssize_t j = 0; j < dim; ++j) {
newCentersRef(i, j) /= static_cast<T>(*centers_counter_it);
}
}
Expand Down Expand Up @@ -180,7 +180,7 @@ inline std::tuple<np_array_nfc<T>, int, int, np_array<T>> cluster_loop(
it += 1;
} while (it < max_iter && !converged);
int res = converged ? 0 : 1;
np_array<T> npInertias({static_cast<pybind11::ssize_t>(inertias.size())});
np_array<T> npInertias({static_cast<py::ssize_t>(inertias.size())});
std::copy(inertias.begin(), inertias.end(), npInertias.mutable_data());
return std::make_tuple(currentCenters, res, it, npInertias);
}
Expand Down
20 changes: 10 additions & 10 deletions deeptime/markov/hmm/_bindings/include/OutputModelUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,19 +52,19 @@ np_array<std::int64_t> generateObservationTrajectory(const np_array_nfc<State> &
auto nThreads = std::thread::hardware_concurrency();
std::vector<deeptime::thread::scoped_thread> threads;
threads.reserve(nThreads);
auto grainSize = std::max(static_cast<pybind11::ssize_t>(1), nTimesteps / nThreads);
auto grainSize = std::max(static_cast<py::ssize_t>(1), nTimesteps / nThreads);

const auto* hiddenStateTrajectoryBuf = hiddenStateTrajectory.data();
const auto* outputProbabilitiesBuf = outputProbabilities.data();

for(pybind11::ssize_t nextIndex = 0; nextIndex < nTimesteps; nextIndex += grainSize) {
for(py::ssize_t nextIndex = 0; nextIndex < nTimesteps; nextIndex += grainSize) {
auto beginIndex = nextIndex;
auto endIndex = std::min(nextIndex+grainSize, nTimesteps);
threads.emplace_back([hiddenStateTrajectoryBuf, outputProbabilitiesBuf,
beginIndex, endIndex, outputPtr, nObs]{
auto generator = deeptime::rnd::randomlySeededGenerator();
std::discrete_distribution<> ddist;
for(pybind11::ssize_t t = beginIndex; t < endIndex; ++t) {
for(py::ssize_t t = beginIndex; t < endIndex; ++t) {
auto state = hiddenStateTrajectoryBuf[t];
auto begin = outputProbabilitiesBuf + state * nObs; // outputProbabilities.at(state, 0)
auto end = outputProbabilitiesBuf + (state+1) * nObs; // outputProbabilities.at(state+1, 0)
Expand Down Expand Up @@ -94,7 +94,7 @@ np_array<dtype> toOutputProbabilityTrajectory(const np_array_nfc<State> &observa
auto T = observations.shape(0);

#pragma omp parallel for collapse(2) default(none) firstprivate(P, obs, nHidden, nObs, T, outputPtr)
for (ssize_t t = 0; t < T; ++t) {
for (py::ssize_t t = 0; t < T; ++t) {
for (std::size_t i = 0; i < nHidden; ++i) {
outputPtr[t*nHidden + i] = P[obs[t] + i*nObs];
}
Expand All @@ -107,7 +107,7 @@ template<typename dtype, typename State>
void sample(const std::vector<np_array_nfc<State>> &observationsPerState, np_array_nfc<dtype> &outputProbabilities,
const np_array_nfc<dtype> &prior) {
auto nObs = outputProbabilities.shape(1);
ssize_t currentState{0};
py::ssize_t currentState{0};

auto& generator = deeptime::rnd::staticThreadLocalGenerator();
deeptime::rnd::dirichlet_distribution<dtype> dirichlet;
Expand All @@ -126,7 +126,7 @@ void sample(const std::vector<np_array_nfc<State>> &observationsPerState, np_arr
std::vector<dtype> histPrivate(nObs, 0);

#pragma omp for
for(ssize_t i = 0; i < T; ++i) {
for(py::ssize_t i = 0; i < T; ++i) {
++histPrivate.at(observationsBuf[i]);
}

Expand All @@ -140,7 +140,7 @@ void sample(const std::vector<np_array_nfc<State>> &observationsPerState, np_arr

#else

for (ssize_t i = 0; i < observations.size(); ++i) {
for (py::ssize_t i = 0; i < observations.size(); ++i) {
++hist.at(observations.at(i));
}

Expand Down Expand Up @@ -228,7 +228,7 @@ np_array<dtype> pO(dtype o, const np_array_nfc<dtype> &mus, const np_array_nfc<d
auto sigmasBuf = sigmas.data();

#pragma omp parallel for
for (pybind11::ssize_t i = 0; i < N; ++i) {
for (py::ssize_t i = 0; i < N; ++i) {
pBuf[i] = sample(o, musBuf[i], sigmasBuf[i]);
}

Expand Down Expand Up @@ -306,7 +306,7 @@ std::tuple<np_array<dtype>, np_array<dtype>> fit(std::size_t nHiddenStates, cons
for (decltype(nHiddenStates) i = 0; i < nHiddenStates; ++i) {
dtype dot = 0;
dtype wStateSum = 0;
for (ssize_t t = 0; t < obs.shape(0); ++t) {
for (py::ssize_t t = 0; t < obs.shape(0); ++t) {
dot += w.at(t, i) * obsPtr[t];
wStateSum += w.at(t, i);
}
Expand Down Expand Up @@ -335,7 +335,7 @@ std::tuple<np_array<dtype>, np_array<dtype>> fit(std::size_t nHiddenStates, cons
for (decltype(nHiddenStates) i = 0; i < nHiddenStates; ++i) {
dtype wStateSum = 0;
dtype sigmaUpdate = 0;
for (ssize_t t = 0; t < obs.shape(0); ++t) {
for (py::ssize_t t = 0; t < obs.shape(0); ++t) {
auto sqrty = static_cast<dtype>(obsPtr[t]) - static_cast<dtype>(means.at(i));
sigmaUpdate += w.at(t, i) * sqrty*sqrty;
wStateSum += w.at(t, i);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ class RevPiSampler {
auto b = arrB.template unchecked<1>();
auto X = arrX.template mutable_unchecked<2>();

for (ssize_t k = 0; k < M; ++k) {
for (ssize_t l = 0; l < k; ++l) {
for (py::ssize_t k = 0; k < M; ++k) {
for (py::ssize_t l = 0; l < k; ++l) {
if (C(k, l) + C(k, l) > 0) {
auto xkl = X(k, l);
auto xkl_new = sample_quad(X(k, l), X(k, k), X(l, l),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
#include "common.h"

template<typename dtype>
void convertImpl(ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<dtype> &yArr,
void convertImpl(py::ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<dtype> &yArr,
const np_array_nfc<dtype> &dataArr, np_array_nfc<dtype> &nuArr,
np_array_nfc<dtype> &dataPArr, np_array_nfc<dtype> &diagPArr,
const np_array<std::int32_t> &indicesArr, const np_array<std::int32_t> &indptrArr) {
Expand All @@ -22,7 +22,7 @@ void convertImpl(ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<
auto nu = nuArr.template mutable_unchecked<1>();

// Loop over rows of Cs
for (ssize_t k = 0; k < M; ++k) {
for (py::ssize_t k = 0; k < M; ++k) {
nu(k) = std::exp(y(k));
// Loop over nonzero entries in row of Cs
for (std::int32_t l = indptr(k); l < indptr(k + 1); ++l) {
Expand All @@ -46,7 +46,7 @@ void convertImpl(ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<
}

template<typename dtype>
void FImpl(ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<dtype> &yArr,
void FImpl(py::ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<dtype> &yArr,
const np_array_nfc<dtype> &cArr, const np_array_nfc<dtype> &dataArr,
np_array_nfc<dtype> &FvalArr,
const np_array<std::int32_t> &indicesArr, const np_array<std::int32_t> &indptrArr) {
Expand All @@ -61,7 +61,7 @@ void FImpl(ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<dtype>
auto Fval = FvalArr.template mutable_unchecked<1>();

// Loop over rows of Cs
for (ssize_t k = 0; k < M; ++k) {
for (py::ssize_t k = 0; k < M; ++k) {
Fval(k) += 1.0;
Fval(k + M) -= c(k);

Expand All @@ -83,7 +83,7 @@ void FImpl(ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<dtype>
}

template<typename dtype>
void dfImpl(ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<dtype> &yArr,
void dfImpl(py::ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<dtype> &yArr,
const np_array_nfc<dtype> &dataArr,
np_array_nfc<dtype> &dataHxxArr, np_array_nfc<dtype> &dataHyyArr, np_array_nfc<dtype> &dataHyxArr,
np_array_nfc<dtype> &diagDxxArr, np_array_nfc<dtype> &diagDyyArr, np_array_nfc<dtype> &diagDyxArr,
Expand All @@ -104,7 +104,7 @@ void dfImpl(ssize_t M, const np_array_nfc<dtype> &xArr, const np_array_nfc<dtype


// Loop over rows of Cs
for (ssize_t k = 0; k < M; ++k) {
for (py::ssize_t k = 0; k < M; ++k) {
// Loop over nonzero entries in row of Cs
for (std::int32_t l = indptr(k); l < indptr(k + 1); ++l) {
// Column index of current element
Expand Down
8 changes: 4 additions & 4 deletions deeptime/markov/tools/kahandot/kahandot_module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ template<typename dtype>
auto ksum(const dtype* const begin, const dtype* const end) -> dtype {
auto n = std::distance(begin, end);
dtype sum {0};
ssize_t o {0};
py::ssize_t o {0};
dtype correction {0};

while (n--) {
Expand Down Expand Up @@ -44,11 +44,11 @@ auto kdot(const np_array_nfc<dtype> &arrA, const np_array_nfc<dtype> &arrB) -> n

auto C = Carr.template mutable_unchecked<2>();

for (ssize_t i = 0; i < n; ++i) {
for (ssize_t j = 0; j < l; ++j) {
for (py::ssize_t i = 0; i < n; ++i) {
for (py::ssize_t j = 0; j < l; ++j) {
dtype err{0};
dtype sum{0};
for (ssize_t k = 0; k < m; ++k) {
for (py::ssize_t k = 0; k < m; ++k) {
auto y = A(i, k) * B(k, j) - err;
auto t = sum + y;
err = (t - sum) - y;
Expand Down
2 changes: 2 additions & 0 deletions devtools/azure-pipelines-linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ jobs:
python.version: '3.8'
Python39:
python.version: '3.9'
Python310:
python.version: '3.10'

maxParallel: 10

Expand Down
2 changes: 2 additions & 0 deletions devtools/azure-pipelines-osx.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ jobs:
python.version: '3.8'
Python39:
python.version: '3.9'
Python310:
python.version: '3.10'

maxParallel: 10

Expand Down
2 changes: 2 additions & 0 deletions devtools/azure-pipelines-win.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ jobs:
python.version: '3.8'
Python39:
python.version: '3.9'
Python310:
python.version: '3.10'

maxParallel: 10

Expand Down
8 changes: 0 additions & 8 deletions devtools/conda-recipe/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,3 @@ requirements:
test:
imports:
- deeptime
source_files:
- tests/*
requires:
- pytest
- pytest-sugar
- scikit-learn
- pytorch
- cpuonly
5 changes: 0 additions & 5 deletions devtools/conda-recipe/run_test.py

This file was deleted.

2 changes: 1 addition & 1 deletion devtools/conda-setup+build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,6 @@ steps:
displayName: 'Update and install dependencies'
continueOnError: false
- bash: |
conda build devtools --python 3.9 --numpy 1.21 -c pytorch
conda build devtools --python 3.10 --numpy 1.21
displayName: 'Build and test'
continueOnError: false
10 changes: 9 additions & 1 deletion noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,15 @@ def tests(session: nox.Session) -> None:
session.log("Running without coverage")
cov_args = []

session.run("pytest", '-vv', '--doctest-modules', '--durations=20', *cov_args, '--pyargs', "tests/", 'deeptime')
test_dirs = ["tests/"]
try:
import torch
# only run doctests if torch is available
test_dirs.append('deeptime')
except ImportError:
pass

session.run("pytest", '-vv', '--doctest-modules', '--durations=20', *cov_args, '--pyargs', *test_dirs)


@nox.session(reuse_venv=True)
Expand Down
2 changes: 1 addition & 1 deletion tests/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ scipy==1.7.2
pybind11==2.8.1
scikit-learn==1.0.1
threadpoolctl==3.0.0
torch==1.10.0
torch>=1.10.0; python_version<"3.10"

pytest==6.2.5
pytest-cov
Expand Down

0 comments on commit 7745c96

Please sign in to comment.