diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 25f48a6..ee7d4de 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -79,10 +79,11 @@ jobs: with: name: python-package-distributions path: dist/ - - - name: Upload to PyPI - env: - PYPI_PASSWORD: ${{ secrets.pypi_password }} - run: | - pip install twine; - python -m twine upload dist/* -u tommyod -p "$PYPI_PASSWORD" --skip-existing; + + # https://github.com/pypa/gh-action-pypi-publish + - name: Publish Python distribution to PyPI + uses: pypa/gh-action-pypi-publish@e53eb8b103ffcb59469888563dc324e3c8ba6f06 + with: + skip-existing: true + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/KDEpy/BaseKDE.py b/KDEpy/BaseKDE.py index 0ed0e78..6c46ac4 100644 --- a/KDEpy/BaseKDE.py +++ b/KDEpy/BaseKDE.py @@ -190,12 +190,11 @@ def _process_sequence(sequence_array_like): Examples -------- >>> res = BaseKDE._process_sequence([1, 2, 3]) - >>> (res == np.array([[1], [2], [3]])).all() - True + >>> assert (res == np.array([[1], [2], [3]])).all() """ # Must convert to float to avoid possible interger overflow if isinstance(sequence_array_like, Sequence): - out = np.asfarray(sequence_array_like).reshape(-1, 1) + out = np.asarray(sequence_array_like, dtype=float).reshape(-1, 1) elif isinstance(sequence_array_like, np.ndarray): if len(sequence_array_like.shape) == 1: out = sequence_array_like.reshape(-1, 1) diff --git a/KDEpy/NaiveKDE.py b/KDEpy/NaiveKDE.py index 487060b..eb51d9d 100644 --- a/KDEpy/NaiveKDE.py +++ b/KDEpy/NaiveKDE.py @@ -123,7 +123,7 @@ def evaluate(self, grid_points=None): # For every data point, compute the kernel and add to the grid bw = self.bw if isinstance(bw, numbers.Number): - bw = np.asfarray(np.ones(self.data.shape[0]) * bw) + bw = np.asarray(np.ones(self.data.shape[0]) * bw, dtype=float) # TODO: Implementation w.r.t grid points for faster evaluation # See the SciPy evaluation for how this can be done diff --git a/KDEpy/TreeKDE.py b/KDEpy/TreeKDE.py index 167997d..197c231 100644 --- a/KDEpy/TreeKDE.py +++ b/KDEpy/TreeKDE.py @@ -136,7 +136,7 @@ def evaluate(self, grid_points=None, eps=10e-4): obs, dims = self.data.shape bw = self.bw if isinstance(bw, numbers.Number): - bw = np.asfarray(np.ones(obs) * bw) + bw = np.asarray(np.ones(obs) * bw, dtype=float) else: bw = np.asarray_chkfinite(bw, dtype=float) diff --git a/KDEpy/binning.py b/KDEpy/binning.py index 7bc1604..1ab05b1 100644 --- a/KDEpy/binning.py +++ b/KDEpy/binning.py @@ -104,16 +104,16 @@ def linbin_cython(data, grid_points, weights=None): dx = (max_grid - min_grid) / num_intervals transformed_data = (data - min_grid) / dx - result = np.asfarray(np.zeros(num_intervals + 2)) + result = np.asarray(np.zeros(num_intervals + 2), dtype=float) # Two Cython functions are implemented, one for weighted data and one # for unweighted data, since creating equal weights is costly w.r.t time if weights is None: result = _cutils.iterate_data_1D(transformed_data, result) - return np.asfarray(result[:-1]) / transformed_data.shape[0] + return np.asarray(result[:-1], dtype=float) / transformed_data.shape[0] else: res = _cutils.iterate_data_1D_weighted(transformed_data, weights, result) - return np.asfarray(res[:-1]) # Remove last, outside of grid + return np.asarray(res[:-1], dtype=float) # Remove last, outside of grid def linbin_numpy(data, grid_points, weights=None): @@ -197,7 +197,7 @@ def linbin_numpy(data, grid_points, weights=None): unique_integrals = np.unique(integral) unique_integrals = unique_integrals[(unique_integrals >= 0) & (unique_integrals <= len(grid_points))] - result = np.asfarray(np.zeros(len(grid_points) + 1)) + result = np.asarray(np.zeros(len(grid_points) + 1), dtype=float) for grid_point in unique_integrals: # Use binary search to find indices for the grid point # Then sum the data assigned to that grid point @@ -337,7 +337,7 @@ def linbin_Ndim(data, grid_points, weights=None): # Compute the number of grid points for each dimension in the grid grid_num = (grid_points[:, i] for i in range(dims)) - grid_num = np.array(list(len(np.unique(g)) for g in grid_num)) + grid_num = np.array(list(len(np.unique(g)) for g in grid_num), dtype="long") # Scale the data to the grid min_grid = np.min(grid_points, axis=0) @@ -356,7 +356,7 @@ def linbin_Ndim(data, grid_points, weights=None): # Weighted data has two specific routines if weights is not None: if data_dims >= 3: - binary_flgs = cartesian(([0, 1],) * dims) + binary_flgs = cartesian(([0, 1],) * dims).astype("long") result = _cutils.iterate_data_ND_weighted(data, weights, result, grid_num, obs_tot, binary_flgs) else: result = _cutils.iterate_data_2D_weighted(data, weights, result, grid_num, obs_tot) @@ -367,7 +367,7 @@ def linbin_Ndim(data, grid_points, weights=None): # specialize routine for this case. else: if data_dims >= 3: - binary_flgs = cartesian(([0, 1],) * dims) + binary_flgs = cartesian(([0, 1],) * dims).astype("long") result = _cutils.iterate_data_ND(data, result, grid_num, obs_tot, binary_flgs) else: result = _cutils.iterate_data_2D(data, result, grid_num, obs_tot) diff --git a/KDEpy/bw_selection.py b/KDEpy/bw_selection.py index a54950a..77a8d81 100644 --- a/KDEpy/bw_selection.py +++ b/KDEpy/bw_selection.py @@ -55,8 +55,8 @@ def _fixed_point(t, N, I_sq, a2): """ # This is important, as the powers might overflow if not done - I_sq = np.asfarray(I_sq, dtype=FLOAT) - a2 = np.asfarray(a2, dtype=FLOAT) + I_sq = np.asarray(I_sq, dtype=FLOAT) + a2 = np.asarray(a2, dtype=FLOAT) # ell = 7 corresponds to the 5 steps recommended in the paper ell = 7 @@ -72,7 +72,8 @@ def _fixed_point(t, N, I_sq, a2): # but this is faster so and requires an import less # Step one: estimate t_s from |f^(s+1)|^2 - odd_numbers_prod = np.product(np.arange(1, 2 * s + 1, 2, dtype=FLOAT)) + # odd_numbers_prod = np.product(np.arange(1, 2 * s + 1, 2, dtype=FLOAT)) + odd_numbers_prod = np.prod(np.arange(1, 2 * s + 1, 2, dtype=FLOAT)) K0 = odd_numbers_prod / np.sqrt(2 * np.pi) const = (1 + (1 / 2) ** (s + 1 / 2)) / 3 time = np.power((2 * const * K0 / (N * f)), (2.0 / (3.0 + 2.0 * s))) diff --git a/KDEpy/kernel_funcs.py b/KDEpy/kernel_funcs.py index f6e719c..f966198 100644 --- a/KDEpy/kernel_funcs.py +++ b/KDEpy/kernel_funcs.py @@ -252,8 +252,7 @@ def __init__(self, function, var=1, support=3): ... return np.exp(-x) / normalization >>> kernel = Kernel(exp, var=4, support=np.inf) >>> # The function is scaled so that the standard deviation (bw) = 1 - >>> kernel(0, bw=1, norm=2)[0] > kernel(1, bw=1, norm=2)[0] - True + >>> assert kernel(0, bw=1, norm=2)[0] > kernel(1, bw=1, norm=2)[0] >>> np.allclose(kernel(np.array([0, 1, 2])), kernel([0, 1, 2])) True >>> np.allclose(kernel(0), kernel([0])) @@ -292,7 +291,7 @@ def practical_support(self, bw, atol=10e-5): else: def f(x): - return self.evaluate(x, bw=bw) - atol + return self.evaluate(x, bw=bw)[0] - atol try: xtol = 1e-3 diff --git a/KDEpy/tests/test_api.py b/KDEpy/tests/test_api.py index 6cb82b0..ac3fa02 100644 --- a/KDEpy/tests/test_api.py +++ b/KDEpy/tests/test_api.py @@ -51,7 +51,7 @@ def test_api_models_kernels_bandwidths(kde1, kde2, bw, kernel): assert err < 0.002 -type_functions = [tuple, np.array, np.asfarray, lambda x: np.asfarray(x).reshape(-1, 1)] +type_functions = [tuple, np.array, np.asarray, lambda x: np.asarray(x, dtype=float).reshape(-1, 1)] @pytest.mark.parametrize( diff --git a/KDEpy/tests/test_kernel_funcs.py b/KDEpy/tests/test_kernel_funcs.py index 49086ff..ad450ed 100644 --- a/KDEpy/tests/test_kernel_funcs.py +++ b/KDEpy/tests/test_kernel_funcs.py @@ -56,7 +56,11 @@ def test_integral_unity(self, fname, function): a, b = -function.support, function.support else: a, b = -5 * function.var, 5 * function.var - integral, abserr = quad(function, a=a, b=b) + + def function_float(x): + return function(x)[0] + + integral, _ = quad(function_float, a=a, b=b) assert np.isclose(integral, 1) @pytest.mark.parametrize( @@ -74,10 +78,11 @@ def test_integral_unity_2D_many_p_norms(self, p, kernel_name): a, b = -function.support, function.support # Perform integration 2D - def int2D(x1, x2): - return function([[x1, x2]], norm=p) + def function_float(x1, x2): + return function([[x1, x2]], norm=p)[0] - ans, err = scipy.integrate.nquad(int2D, [[a, b], [a, b]], opts={"epsabs": 10e-2, "epsrel": 10e-2}) + opts = {"epsabs": 10e-2, "epsrel": 10e-2} + ans, _ = scipy.integrate.nquad(function_float, [[a, b], [a, b]], opts=opts) assert np.allclose(ans, 1, rtol=10e-4, atol=10e-4) @@ -93,10 +98,11 @@ def test_integral_unity_3D_many_p_norms(self, p): a, b = -function.support, function.support # Perform integration 2D - def int2D(x1, x2, x3): - return function([[x1, x2, x3]], norm=p) + def function_float(x1, x2, x3): + return function([[x1, x2, x3]], norm=p)[0] - ans, err = scipy.integrate.nquad(int2D, [[a, b], [a, b], [a, b]], opts={"epsabs": 10e-1, "epsrel": 10e-1}) + opts = {"epsabs": 10e-1, "epsrel": 10e-1} + ans, _ = scipy.integrate.nquad(function_float, [[a, b], [a, b], [a, b]], opts=opts) assert np.allclose(ans, 1, rtol=10e-2, atol=10e-2) @@ -121,10 +127,11 @@ def test_integral_unity_2D_p_norm(self, fname, function, p): a, b = -6, 6 # Perform integration 2D - def int2D(x1, x2): - return function([[x1, x2]], norm=p) + def function_float(x1, x2): + return function([[x1, x2]], norm=p)[0] - ans, err = scipy.integrate.nquad(int2D, [[a, b], [a, b]], opts={"epsabs": 10e-1, "epsrel": 10e-1}) + opts = {"epsabs": 10e-1, "epsrel": 10e-1} + ans, _ = scipy.integrate.nquad(function_float, [[a, b], [a, b]], opts=opts) assert np.allclose(ans, 1, rtol=10e-3, atol=10e-3) @@ -150,10 +157,11 @@ def test_integral_unity_3D_p_norm(self, fname, function, p): a, b = -4, 4 # Perform integration 2D - def int2D(x1, x2, x3): - return function([[x1, x2, x3]], norm=p) + def function_float(x1, x2, x3): + return function([[x1, x2, x3]], norm=p)[0] - ans, err = scipy.integrate.nquad(int2D, [[a, b], [a, b], [a, b]], opts={"epsabs": 10e-1, "epsrel": 10e-1}) + opts = {"epsabs": 10e-1, "epsrel": 10e-1} + ans, _ = scipy.integrate.nquad(function_float, [[a, b], [a, b], [a, b]], opts=opts) assert np.allclose(ans, 1, rtol=10e-2, atol=10e-2) diff --git a/pyproject.toml b/pyproject.toml index bb4dee7..499add5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,8 @@ [project] name = "KDEpy" -version = "1.1.8" +version = "1.1.10" dependencies = [ - "numpy>=1.14.2,<2.0", + "numpy>=1.14.2", "scipy>=1.0.1,<2.0", ] description = "Kernel Density Estimation in Python."