diff --git a/_modules/index.html b/_modules/index.html index 0a32b3c..76f314d 100644 --- a/_modules/index.html +++ b/_modules/index.html @@ -1,42 +1,33 @@ + - - - +
- -
# -*- coding: utf-8 -*-
-from __future__ import (absolute_import, division,
+from __future__ import (absolute_import, division,
print_function, unicode_literals)
-from builtins import *
+from builtins import (
+ bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next,
+ oct, open, pow, round, super, filter, map, zip
+)
import numpy as np
import pkg_resources
+import datetime
+
+
+def lorenz_euler(length, sigma, rho, beta, dt=0.01, start=[1,1,1]):
+ """
+ Simulates the Lorenz system using a simple Euler method
+
+ The Lorenz system is a three dimensional dynamical system given
+ by the following equations:
+
+ dx/dt = sigma * (y - x)
+ dy/dt = rho * x - y - x * z
+ dz/dt = x * y - beta * z
+ """
+ def lorenz(state, sigma, rho, beta):
+ x, y, z = state
+ return np.array([
+ sigma * (y - x),
+ rho * x - y - x * z,
+ x * y - beta * z
+ ], dtype="float32")
+ trajectory = np.zeros((length, 3), dtype="float32")
+ trajectory[0] = start
+ for i in range(1, length):
+ t = i * dt
+ trajectory[i] = trajectory[i-1] + lorenz(trajectory[i-1], sigma, rho, beta) * dt
+ return trajectory
+
+def lorenz_lyap(sigma, rho, beta):
+ """
+ Calculates the exact Lyapunov dimension of the Lorenz system according to
+ Leonov 2015 [ll_1]_.
+
+ References:
+ .. [ll_1] G. A. Leonov and N. V. Kuznetsov, “On differences and similarities in the
+ analysis of Lorenz, Chen, and Lu systems,” Applied Mathematics and Computation,
+ vol. 256, pp. 334–343, Apr. 2015, doi: 10.1016/j.amc.2014.12.132.
+ """
+ return 3 - 2 * (sigma + beta + 1) / (sigma + 1 + np.sqrt((sigma-1) ** 2 + 4 * sigma * rho))
+
-[docs]def fbm(n, H=0.75):
- """
+
+[docs]
+def fbm(n, H=0.75):
+ """
Generates fractional brownian motions of desired length.
Author:
@@ -83,8 +119,11 @@ Source code for nolds.datasets
return np.dot(sigma, v)
-[docs]def fgn(n, H=0.75):
- """
+
+
+[docs]
+def fgn(n, H=0.75):
+ """
Generates fractional gaussian noise of desired length.
References:
@@ -102,11 +141,14 @@ Source code for nolds.datasets
array of float:
simulated fractional gaussian noise
"""
- return np.diff(fbm(n+1,H=H))
+ return np.diff(fbm(n+1, H=H))
+
-[docs]def qrandom(n):
- """
+
+[docs]
+def qrandom(n):
+ """
Creates an array of n true random numbers obtained from the quantum random
number generator at qrng.anu.edu.au
@@ -126,8 +168,12 @@ Source code for nolds.datasets
for i in range(int(np.ceil(n/1024.0)))
])[:n]
-[docs]def load_qrandom():
- """
+
+
+
+[docs]
+def load_qrandom():
+ """
Loads a set of 10000 random numbers generated by qrandom.
This dataset can be used when you want to do some limited tests with "true"
@@ -141,11 +187,13 @@ Source code for nolds.datasets
with pkg_resources.resource_stream(__name__, fname) as f:
return np.load(f)
+
+
def load_brown72():
- """
+ """
Loads the dataset brown72 with a prescribed Hurst exponent of 0.72
- Source: http://www.bearcave.com/misl/misl_tech/wavelets/hurst/
+ Source: http://bearcave.com/misl/misl_tech/wavelets/hurst/index.html
Returns:
float array:
@@ -155,8 +203,41 @@ Source code for nolds.datasets
with pkg_resources.resource_stream(__name__, fname) as f:
return np.load(f)
-[docs]def tent_map(x, steps, mu=2):
- """
+
+def load_lorenz_physionet():
+ """
+ Loads a dataset containing the X variable of the Lorenz system
+ as well as the output of PhysioNet's dfa implementation on that dataset.
+
+ The input data was created with the following code:
+
+ data = datasets.lorenz_euler(
+ 3000, 10, 28, 8/3.0, start=[0.1,0.1,0.1], dt=0.012
+ )[1000:,0]
+
+ The ouptut from PhysioNet was created by calling:
+
+ dfa < lorenz.txt > lorenz_physionet.txt
+
+ Returns:
+ 1d float array:
+ time series of the X variable of the Lorenz system that was used as input
+ 2d float array:
+ x- and y-coordinates of the line fitting step in the PhysioNet output
+ """
+ fname = "datasets/lorenz.txt"
+ with pkg_resources.resource_stream(__name__, fname) as f:
+ data_in = np.loadtxt(f)
+ fname = "datasets/lorenz_physionet.txt"
+ with pkg_resources.resource_stream(__name__, fname) as f:
+ data_out = np.loadtxt(f)
+ return data_in, data_out
+
+
+
+[docs]
+def tent_map(x, steps, mu=2):
+ """
Generates a time series of the tent map.
Characteristics and Background:
@@ -205,16 +286,20 @@ Source code for nolds.datasets
x = mu * x if x < 0.5 else mu * (1 - x)
yield x
+
# TODO should all math be formatted like this, or should the documentation of
# logistic_map revert to a version that is more readable as plain text
-[docs]def logistic_map(x, steps, r=4):
- r"""
+
+
+[docs]
+def logistic_map(x, steps, r=4):
+ r"""
Generates a time series of the logistic map.
Characteristics and Background:
The logistic map is among the simplest examples for a time series that can
- exhibit chaotic behavior depending on the parameter r. For r between 2 and
+ exhibit chaotic behavior depending on the parameter r. For r between 2 and
3, the series quickly becomes static. At r=3 the first bifurcation point is
reached after which the series starts to oscillate. Beginning with r = 3.6
it shows chaotic behavior with a few islands of stability until perfect
@@ -225,7 +310,7 @@ Source code for nolds.datasets
have to make a few observations for maps in general that are repeated
applications of a function to a starting value.
- If we have two starting values that differ by some infinitesimal
+ If we have two starting values that differ by some infinitesimal
:math:`delta_0` then according to the definition of the lyapunov exponent
we will have an exponential divergence:
@@ -299,15 +384,175 @@ Source code for nolds.datasets
x = r * x * (1 - x)
yield x
-brown72 = load_brown72()
+
+
+[docs]
+def load_financial():
+ """
+ Loads the following datasets from CSV files in this package:
+
+ - jkse: Jakarta Composite Index, downloaded on 2019-02-12 from https://finance.yahoo.com/quote/%5EJKSE/history?period1=631148400&period2=988668000&interval=1d&filter=history&frequency=1d
+ - n225: Nikkei 225, downloaded on 2019-02-12 from https://finance.yahoo.com/quote/%5EN225/history?period1=631148400&period2=988668000&interval=1d&filter=history&frequency=1d
+ - ndx: NASDAQ 100, downloaded on 2019-02-12 from https://finance.yahoo.com/quote/%5ENDX/history?period1=631148400&period2=988668000&interval=1d&filter=history&frequency=1d
+
+ All datasets are daily prices from the period from 1990-01-01 to 2001-05-01
+ missing values are NaN except for opening values which are treated as
+ follows:
+
+ - If the first opening value is missing, the first *existing* opening value
+ is used for the first day.
+ - All other missing opening values are filled by the close value of the last
+ day where data was available.
+
+ Returns:
+ list of tuple(1d-array, 2d-array):
+ datasets with days as array of date objects and 2d-array with the columns
+ "Open", "High", "Low", "Close", "Adj Close", and "Volume". Note that
+ "Open" values have been padded to ensure that there are no NaNs left.
+ """
+
+ def load_finance_yahoo_data(f):
+ f.readline()
+ days = []
+ values = []
+ for l in f:
+ fields = l.decode("utf-8")
+ fields = fields.split(",")
+ d = datetime.datetime.strptime(fields[0], "%Y-%m-%d")
+ v = [np.nan if x.strip() == "null" else float(x) for x in fields[1:]]
+ days.append(d)
+ values.append(v)
+ return np.array(days), np.array(values)
+
+ def pad_opening_values(values):
+ # fill first value from future if required
+ first = 0
+ while np.isnan(values[first, 0]):
+ first += 1
+ values[0, 0] = values[first, 0]
+ # iterate over all indices where data is missing
+ for i in np.where(np.isnan(values[:, 0]))[0]:
+ j = i
+ # pad opening value with close value of previous data
+ while np.isnan(values[j][3]):
+ j -= 1
+ values[i, 0] = values[j, 3]
+
+ data = []
+ for index in ["^JKSE", "^N225", "^NDX"]:
+ fname = "datasets/{}.csv".format(index)
+ with pkg_resources.resource_stream(__name__, fname) as f:
+ days, values = load_finance_yahoo_data(f)
+ pad_opening_values(values)
+ data.append((days, values))
+ return data
+
+
+
+
+[docs]
+def barabasi1991_fractal(size, iterations, b1=0.8, b2=0.5):
+ """
+ Generates the simple fractal described in [bf]_.
+
+ The fractal divides a rectangular segment starting at (x0, y0) with width w
+ and height h along the x axis into four line segments of equal size with the
+ boundary points [x0, x1, x2, x3, x4]. It has two parameters b1 and b2 that
+ allow to choose the value for y(x1) and y(x3) while it always holds that
+ y(x0) = y0, y(x2) = y0 and y(x4) = y0 + h.
+
+ The process starts with a single line segment of height 1 spanning the whole
+ data range. In each iteration, the rectangles spanning the line segments
+ from the previous iteration are subdivided according to the same rule.
+
+ References:
+ .. [bf] A.-L. Barabási and T. Vicsek, “Multifractality of self-affine
+ fractals,” Physical Review A, vol. 44, no. 4, pp. 2730–2733, 1991.
+
+ Args:
+ size (int):
+ number of data points in the resulting array
+ iterations (int):
+ number of iterations to perform
+
+ Kwargs:
+ b1 (float):
+ relative height at x1 (between 0 and 1)
+ b2 (float):
+ relative height at x3 (between 0 and 1)
+
+ Returns:
+ (1d-array of float):
+ generated fractal
+ """
+ def b1991(x0, y0, w, h):
+ if h < 0:
+ # for a segment with negative slope we have flip the x-axis
+ d, nxtp = b1991(x0, y0 + h, w, -h)
+ return d[::-1], nxtp
+ x1 = x0 + w // 4
+ x2 = x0 + w // 2
+ x3 = x2 + w // 4
+ x4 = x0 + w
+ data = np.zeros(w, dtype=np.float64)
+ data[x0 - x0:x1 - x0] = np.linspace(0, 1, x1 - x0) * b1 * h + y0
+ data[x1 - x0:x2 - x0] = np.linspace(1, 0, x2 - x1) * b1 * h + y0
+ data[x2 - x0:x3 - x0] = np.linspace(0, 1, x3 - x2) * b2 * h + y0
+ data[x3 - x0:x4 - x0] = np.linspace(0, 1, x4 - x3) * (1 - b2) * h \
+ + y0 + b2 * h
+ return data, [x0, x1, x2, x3, x4]
+ fractal = np.linspace(0, 1, size)
+ intervals = [(0, size)]
+ for _ in range(iterations):
+ next_intervals = []
+ for x1, x2 in intervals:
+ d, nxtp = b1991(x1, fractal[x1], x2 - x1, fractal[x2-1] - fractal[x1])
+ fractal[x1:x2] = d
+ next_intervals.extend(
+ [(np1, np2) for np1, np2 in zip(nxtp[:-1], nxtp[1:])]
+ )
+ intervals = next_intervals
+ return fractal
+
+
+
+brown72 = load_brown72()
+jkse, n225, ndx = load_financial()
+
-
-
+
+
+Nolds
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Navigation
+
+
+
Related Topics
- Documentation overview
@@ -316,26 +561,24 @@ Related Topics
-
- Quick search
-
-
-
+
+
+
+
+
+
+
+
- ©2016-2018, Christopher Schölzel.
+ ©2016-2024, Christopher Schölzel.
|
- Powered by Sphinx 1.6.6
- & Alabaster 0.7.10
+ Powered by Sphinx 8.0.2
+ & Alabaster 1.0.0
diff --git a/_modules/nolds/examples.html b/_modules/nolds/examples.html
index ce62421..8770eed 100644
--- a/_modules/nolds/examples.html
+++ b/_modules/nolds/examples.html
@@ -1,57 +1,52 @@
+
-
-
-
+
-
- nolds.examples — Nolds 0.5.1 documentation
-
-
-
-
-
-
-
+
+
+ nolds.examples — Nolds 0.6.0 documentation
+
+
+
+
+
+
+
+
-
-
-
+
+
+
Source code for nolds.examples
# -*- coding: utf-8 -*-
-from __future__ import (absolute_import, division,
+from __future__ import (absolute_import, division,
print_function, unicode_literals)
-from builtins import *
-from . import measures as nolds
-from . import datasets
+from builtins import (
+ bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next,
+ oct, open, pow, round, super, filter, map, zip
+)
+from . import measures as nolds
+from . import datasets
import numpy as np
-# TODO better legends for plots
-[docs]def weron_2002_figure2(n = 10000):
- """
+
+[docs]
+def weron_2002_figure2(n=10000):
+ """
Recreates figure 2 of [w]_ comparing the reported values by Weron to the
values obtained by the functions in this package.
@@ -61,8 +56,8 @@ Source code for nolds.examples
over all iterations is plotted for the following configurations:
* ``weron`` is the Anis-Lloyd-corrected Hurst exponent calculated by Weron
- * ``rs50`` is the Anis-Lloyd-corrected Hurst exponent calculated by Nolds with
- the same parameters as used by Weron
+ * ``rs50`` is the Anis-Lloyd-corrected Hurst exponent calculated by Nolds
+ with the same parameters as used by Weron
* ``weron_raw`` is the uncorrected Hurst exponent calculated by Weron
* ``rs50_raw`` is the uncorrected Hurst exponent calculated by Nolds with the
same parameters as used by Weron
@@ -89,18 +84,19 @@ Source code for nolds.examples
import matplotlib.pyplot as plt
# note: these values are calculated by measurements in inkscape of the plot
# from the paper
- reported = [6.708, 13.103, 20.240, 21.924, 22.256, 24.112, 24.054, 26.299,
+ reported = [6.708, 13.103, 20.240, 21.924, 22.256, 24.112, 24.054, 26.299,
26.897]
reported_raw = [160.599, 141.663, 128.454, 115.617, 103.651, 95.481, 86.810,
81.799, 76.270]
+
def height_to_h(height):
return 0.49 + height / 29.894 * 0.01
reported = height_to_h(np.array(reported))
reported_raw = height_to_h(np.array(reported_raw))
data = []
- for e in range(8,17):
+ for e in range(8, 17):
l = 2**e
- nvals = 2**np.arange(6,e)
+ nvals = 2**np.arange(6, e)
rsn = np.mean([
nolds.hurst_rs(np.random.normal(size=l), fit="poly")
for _ in range(n)
@@ -110,21 +106,27 @@ Source code for nolds.examples
for _ in range(n)
])
rs50_raw = np.mean([
- nolds.hurst_rs(np.random.normal(size=l), fit="poly", nvals=nvals, corrected=False)
+ nolds.hurst_rs(
+ np.random.normal(size=l), fit="poly", nvals=nvals, corrected=False
+ )
for _ in range(n)
])
data.append((rsn, rs50, rs50_raw))
- lines = plt.plot(np.arange(8,17), data)
- r = plt.plot(np.arange(8,17), reported)
- rr = plt.plot(np.arange(8,17), reported_raw)
+ lines = plt.plot(np.arange(8, 17), data)
+ r = plt.plot(np.arange(8, 17), reported)
+ rr = plt.plot(np.arange(8, 17), reported_raw)
plt.legend(r + rr + lines, ("weron", "weron_raw", "rsn", "rs50", "rs50_raw"))
- plt.xticks(np.arange(8,17),2**np.arange(8,17))
+ plt.xticks(np.arange(8, 17), 2**np.arange(8, 17))
plt.xlabel("sequence length")
plt.ylabel("estimated hurst exponent")
plt.show()
-[docs]def plot_hurst_hist():
- """
+
+
+
+[docs]
+def plot_hurst_hist():
+ """
Plots a histogram of values obtained for the hurst exponent of uniformly
distributed white noise.
@@ -132,17 +134,25 @@ Source code for nolds.examples
"""
# local import to avoid dependency for non-debug use
import matplotlib.pyplot as plt
- hs = [nolds.hurst_rs(np.random.random(size=10000), corrected=True) for _ in range(100)]
+ hs = [
+ nolds.hurst_rs(np.random.random(size=10000), corrected=True)
+ for _ in range(100)
+ ]
plt.hist(hs, bins=20)
plt.xlabel("esimated value of hurst exponent")
plt.ylabel("number of experiments")
plt.show()
-[docs]def plot_lyap(maptype="logistic"):
- """
+
+
+
+[docs]
+def plot_lyap(maptype="logistic"):
+ """
Plots a bifurcation plot of the given map and superimposes the true
lyapunov exponent as well as the estimates of the largest lyapunov exponent
- obtained by ``lyap_r`` and ``lyap_e``. The idea for this plot is taken from [ll]_.
+ obtained by ``lyap_r`` and ``lyap_e``. The idea for this plot is taken
+ from [ll]_.
This function requires the package ``matplotlib``.
@@ -153,8 +163,8 @@ Source code for nolds.examples
Kwargs:
maptype (str):
- can be either ``"logistic"`` for the logistic map or ``"tent"`` for the tent
- map.
+ can be either ``"logistic"`` for the logistic map or ``"tent"`` for the
+ tent map.
"""
# local import to avoid dependency for non-debug use
import matplotlib.pyplot as plt
@@ -166,26 +176,26 @@ Source code for nolds.examples
param_name = "r"
param_range = np.arange(2, 4, 0.01)
full_data = np.array([
- np.fromiter(datasets.logistic_map(x_start, n, r),dtype="float32")
+ np.fromiter(datasets.logistic_map(x_start, n, r), dtype="float32")
for r in param_range
])
# It can be proven that the lyapunov exponent of the logistic map
# (or any map that is an iterative application of a function) can be
# calculated as the mean of the logarithm of the absolute of the
# derivative at the individual data points.
- # For a proof see for example:
+ # For a proof see for example:
# https://blog.abhranil.net/2015/05/15/lyapunov-exponent-of-the-logistic-map-mathematica-code/
# Derivative of logistic map: f(x) = r * x * (1 - x) = r * x - r * x²
# => f'(x) = r - 2 * r * x
lambdas = [
np.mean(np.log(abs(r - 2 * r * x[np.where(x != 0.5)])))
- for x,r in zip(full_data, param_range)
+ for x, r in zip(full_data, param_range)
]
elif maptype == "tent":
- param_name = "$\mu$"
+ param_name = "$\\mu$"
param_range = np.arange(0, 2, 0.01)
full_data = np.array([
- np.fromiter(datasets.tent_map(x_start, n, mu),dtype="float32")
+ np.fromiter(datasets.tent_map(x_start, n, mu), dtype="float32")
for mu in param_range
])
# for the tent map the lyapunov exponent is much easier to calculate
@@ -197,12 +207,12 @@ Source code for nolds.examples
else:
raise Error("maptype %s not recognized" % maptype)
- kwargs_e = { "emb_dim": 6, "matrix_dim": 2 }
- kwargs_r = { "emb_dim": 6, "lag": 2, "min_tsep": 20, "trajectory_len": 20}
+ kwargs_e = {"emb_dim": 6, "matrix_dim": 2}
+ kwargs_r = {"emb_dim": 6, "lag": 2, "min_tsep": 20, "trajectory_len": 20}
lambdas_e = [max(nolds.lyap_e(d, **kwargs_e)) for d in full_data]
lambdas_r = [nolds.lyap_r(d, **kwargs_r) for d in full_data]
bifur_x = np.repeat(param_range, nbifur)
- bifur = np.reshape(full_data[:,-nbifur:], nbifur * param_range.shape[0])
+ bifur = np.reshape(full_data[:, -nbifur:], nbifur * param_range.shape[0])
plt.title("Lyapunov exponent of the %s map" % maptype)
plt.plot(param_range, lambdas, "b-", label="true lyap. exponent")
@@ -218,9 +228,14 @@ Source code for nolds.examples
plt.legend(loc="best")
plt.show()
-[docs]def profiling():
- """
- Runs a profiling test for the function ``lyap_e`` (mainly used for development)
+
+
+
+[docs]
+def profiling():
+ """
+ Runs a profiling test for the function ``lyap_e`` (mainly used for
+ development)
This function requires the package ``cProfile``.
"""
@@ -229,8 +244,12 @@ Source code for nolds.examples
data = np.cumsum(np.random.random(n) - 0.5)
cProfile.runctx('lyap_e(data)', {'lyap_e': nolds.lyap_e}, {'data': data})
-[docs]def hurst_compare_nvals(data, nvals=None):
- """
+
+
+
+[docs]
+def hurst_compare_nvals(data, nvals=None):
+ """
Creates a plot that compares the results of different choices for nvals
for the function hurst_rs.
@@ -245,16 +264,16 @@ Source code for nolds.examples
"""
import matplotlib.pyplot as plt
data = np.asarray(data)
- n_all = np.arange(2,len(data)+1)
+ n_all = np.arange(2, len(data)+1)
dd_all = nolds.hurst_rs(data, nvals=n_all, debug_data=True, fit="poly")
dd_def = nolds.hurst_rs(data, debug_data=True, fit="poly")
n_def = np.round(np.exp(dd_def[1][0])).astype("int32")
n_div = n_all[np.where(len(data) % n_all[:-1] == 0)]
dd_div = nolds.hurst_rs(data, nvals=n_div, debug_data=True, fit="poly")
+
def corr(nvals):
return [np.log(nolds.expected_rs(n)) for n in nvals]
-
l_all = plt.plot(dd_all[1][0], dd_all[1][1] - corr(n_all), "o")
l_def = plt.plot(dd_def[1][0], dd_def[1][1] - corr(n_def), "o")
l_div = plt.plot(dd_div[1][0], dd_div[1][1] - corr(n_div), "o")
@@ -264,11 +283,12 @@ Source code for nolds.examples
if nvals is not None:
dd_cst = nolds.hurst_rs(data, nvals=nvals, debug_data=True, fit="poly")
l_cst = plt.plot(dd_cst[1][0], dd_cst[1][1] - corr(nvals), "o")
- l_cst = l_cst
t_cst = ["custom"]
plt.xlabel("log(n)")
plt.ylabel("log((R/S)_n - E[(R/S)_n])")
- plt.legend(l_all + l_def + l_div + l_cst, ["all", "default", "divisors"] + t_cst)
+ plt.legend(
+ l_all + l_def + l_div + l_cst, ["all", "default", "divisors"] + t_cst
+ )
labeled_data = zip([dd_all[0], dd_def[0], dd_div[0]], ["all", "def", "div"])
for data, label in labeled_data:
print("%s: %.3f" % (label, data))
@@ -276,10 +296,333 @@ Source code for nolds.examples
print("custom: %.3f" % dd_cst[0])
plt.show()
+
+def sampen_default_tolerance():
+ data = list(datasets.logistic_map(0.34, 1000, r=3.9))
+ oldtol = 0.2 * np.std(data, ddof=1)
+ old_res = [
+ nolds.sampen(data, emb_dim=i, tolerance=oldtol)
+ for i in range(1, 30)
+ ]
+ new_res = [
+ nolds.sampen(data, emb_dim=i)
+ for i in range(1, 30)
+ ]
+ for i, old, new in zip(range(1, 30), old_res, new_res):
+ print("emb_dim={} old={:.3f} corrected={:.3f}".format(i, old, new))
+ print(" old variance: {:.3f}".format(np.var(old_res)))
+ print("corrected variance: {:.3f}".format(np.var(new_res)))
+
+def aste_line_fitting(N=100):
+ """
+ Shows plot that proves that the line fitting in T. Astes original MATLAB code
+ provides the same results as `np.polyfit`.
+ """
+ slope = np.random.random() * 10 - 5
+ intercept = np.random.random() * 100 - 50
+ xvals = np.arange(N)
+ yvals = xvals * slope + intercept + np.random.randn(N)*100
+ import matplotlib.pyplot as plt
+ plt.plot(xvals, yvals, "rx", label="data")
+ plt.plot(
+ [0, N-1], [intercept, intercept + slope * (N-1)],
+ "r-", label="true ({:.3f} x + {:.3f})".format(slope, intercept), alpha=0.5
+ )
+ i_aste, s_aste = nolds._aste_line_fit(xvals, yvals)
+ s_np, i_np = np.polyfit(xvals, yvals, 1)
+ plt.plot(
+ [0, N-1], [i_aste, i_aste + s_aste * (N-1)],
+ "b-", label="aste ({:.3f} x + {:.3f})".format(s_aste, i_aste), alpha=0.5
+ )
+ plt.plot(
+ [0, N-1], [i_np, i_np + s_np * (N-1)],
+ "g-", label="numpy ({:.3f} x + {:.3f})".format(s_np, i_np), alpha=0.5
+ )
+ plt.legend()
+ plt.show()
+
+
+def hurst_mf_stock(debug=False):
+ """
+ Recreates results from [mfs_1]_ (table at start of section 4) as print
+ output.
+
+ Unfortunately as a layman in finance, I could not determine the exact data
+ that Di Matteo et al. used. Instead I use the data from
+ `nolds.datasets.load_financial()`.
+
+ Plots H(2) for the following datasets and algorithms.
+
+ Datasets (opening values from `load_financial()`):
+
+ - jkse: Jakarta Composite Index
+ - n225: Nikkei 225
+ - ndx: NASDAQ 100
+
+ Algorithms:
+
+ - mfhurst_b: GHE according to Barabási et al.
+ - mfhurst_b + dt: like mfhurst_b, but with linear detrending performed first
+ - mfhurst_dm: GHE according to Di Matteo et al. (should be identical to
+ _genhurst)
+ - _genhurst: GHE according to translated MATLAB code by T. Aste (one of the
+ co-authors of Di Matteo).
+
+ References:
+
+ .. [mfs_1] T. Di Matteo, T. Aste, and M. M. Dacorogna, “Scaling behaviors
+ in differently developed markets,” Physica A: Statistical Mechanics
+ and its Applications, vol. 324, no. 1–2, pp. 183–188, 2003.
+
+ Kwargs:
+ debug (boolean):
+ if `True`, a debug plot will be shown for each calculated GHE value
+ except for the ones generated by `_genhurst`.
+ """
+ print("Dataset mfhurst_b mfhurst_b + dt mfhurst_dm _genhurst")
+ financial = [
+ (datasets.jkse, "jkse"), (datasets.n225, "n225"), (datasets.ndx, "ndx")
+ ]
+ for data, lab in financial:
+ data = data[1][:, 0]
+ data = np.log(data)
+ dists = range(1, 20)
+ mfh_b = nolds.mfhurst_b(data, qvals=[2], dists=dists, debug_plot=debug)[0]
+ mfh_b_dt = nolds.mfhurst_b(
+ nolds.detrend_data(data, order=1),
+ qvals=[2], dists=dists, debug_plot=debug
+ )[0]
+ mfh_dm = nolds.mfhurst_dm(data, qvals=[2], debug_plot=debug)[0][0]
+ gh = nolds._genhurst(data, 2)
+ print("{:10s} {:5.3f} {:5.3f} {:5.3f} {:5.3f}".format(lab, mfh_b, mfh_b_dt, mfh_dm, gh))
+
+
+def barabasi_1991_figure2():
+ """
+ Recreates figure 2 from [bf2]_.
+
+ This figure compares calculated and estimated values for H(q) for
+ a fractal generated by 9 iterations of the `barabasi1991_fractal` function
+ with b1 = 0.8 and b2 = 0.5.
+
+ References:
+ .. [bf2] A.-L. Barabási and T. Vicsek, “Multifractality of self-affine
+ fractals,” Physical Review A, vol. 44, no. 4, pp. 2730–2733, 1991.
+ """
+ import matplotlib.pyplot as plt
+ b1991 = datasets.barabasi1991_fractal(10000000, 9)
+ qvals = range(1, 11)
+ qvals_t = range(-10, 11)
+ b1 = 0.8
+ b2 = 0.5
+ dists = [4 ** i for i in range(6, 11)]
+ # dists = nolds.logarithmic_n(100, 0.01 * len(b1991), 2)
+ Hq = nolds.mfhurst_b(b1991, qvals=qvals, dists=dists)
+ Hq_t = [np.log((b1 ** q + b2 ** q) / 2) / np.log(0.25) / q for q in qvals_t]
+ plt.plot(qvals, Hq, "r+", label="mfhurst_b")
+ plt.plot(qvals_t, Hq_t, label="calculated value")
+ plt.legend(loc="best")
+ plt.xlabel("q")
+ plt.ylabel("H(q)")
+ plt.show()
+
+
+def barabasi_1991_figure3():
+ """
+ Recreates figure 3 from [bf3]_.
+
+ This figure compares calculated and estimated values for H(q) for a simple
+ Brownian motion that moves in unit steps (-1 or +1) in each time step.
+
+ References:
+ .. [bf3] A.-L. Barabási and T. Vicsek, “Multifractality of self-affine
+ fractals,” Physical Review A, vol. 44, no. 4, pp. 2730–2733, 1991.
+ """
+ import matplotlib.pyplot as plt
+ brown = np.cumsum(np.random.randint(0, 2, size=10000000)*2-1)
+ qvals = [-5, -4, -3, -2, -1.1, 0.1, 1, 2, 3, 4, 5]
+ Hq_t = [0.5 if q > -1 else -0.5/q for q in qvals]
+ dists = [2 ** i for i in range(6, 15)]
+ # dists = nolds.logarithmic_n(100, 0.01 * len(brown), 1.5)
+ Hq = nolds.mfhurst_b(brown, qvals=qvals, dists=dists, debug_plot=False)
+ plt.plot(qvals, Hq, "r+", label="mfhurst_b")
+ plt.plot(qvals, Hq_t, label="calculated value")
+ plt.ylim(0, 1)
+ plt.legend(loc="best")
+ plt.xlabel("q")
+ plt.ylabel("H(q)")
+ plt.show()
+
+
+def lorenz():
+ """
+ Calculates different measures for the Lorenz system of ordinary
+ differential equations and compares nolds results with prescribed
+ results from the literature.
+
+ The Lorenz system is a three dimensional dynamical system given
+ by the following equations:
+
+ dx/dt = sigma * (y - x)
+ dy/dt = rho * x - y - x * z
+ dz/dt = x * y - beta * z
+
+ To test the reconstruction of higher-dimensional phenomena from
+ one-dimensional data, the lorenz system is simulated with a
+ simple Euler method and then the x-, y-, and z-values are used
+ as one-dimensional input for the nolds algorithms.
+
+ Parameters for Lorenz system:
+
+ - sigma = 10
+ - rho = 28
+ - beta = 8/3
+ - dt = 0.012
+
+ Algorithms:
+
+ - ``lyap_r`` with min_tsep=1000, emb_dim=5, tau=0.01, and lag=5 (see [l_4]_)
+ - ``lyap_e`` with min_tsep=1000, emb_dim=5, matrix_dim=5, and tau=0.01 (see [l_4]_)
+ - ``corr_dim`` with emb_dim=10, and fit=poly (see [l_1]_)
+ - ``hurst_rs`` with fit=poly (see [l_3]_)
+ - ``dfa`` with default parameters (see [l_5]_)
+ - ``sampen`` with default parameters (see [l_2]_)
+
+ References:
+
+ .. [l_1] P. Grassberger and I. Procaccia, “Measuring the strangeness
+ of strange attractors,” Physica D: Nonlinear Phenomena, vol. 9,
+ no. 1, pp. 189–208, 1983.
+ .. [l_2] F. Kaffashi, R. Foglyano, C. G. Wilson, and K. A. Loparo,
+ “The effect of time delay on Approximate & Sample Entropy
+ calculations,” Physica D: Nonlinear Phenomena, vol. 237, no. 23,
+ pp. 3069–3074, 2008, doi: 10.1016/j.physd.2008.06.005.
+ .. [l_3] V. Suyal, A. Prasad, and H. P. Singh, “Nonlinear Time Series
+ Analysis of Sunspot Data,” Sol Phys, vol. 260, no. 2, pp. 441–449,
+ 2009, doi: 10.1007/s11207-009-9467-x.
+ .. [l_4] G. A. Leonov and N. V. Kuznetsov, “On differences and
+ similarities in the analysis of Lorenz, Chen, and Lu systems,”
+ Applied Mathematics and Computation, vol. 256, pp. 334–343, 2015,
+ doi: 10.1016/j.amc.2014.12.132.
+ .. [l_5] S. Wallot, J. P. Irmer, M. Tschense, N. Kuznetsov, A. Højlund,
+ and M. Dietz, “A Multivariate Method for Dynamic System Analysis:
+ Multivariate Detrended Fluctuation Analysis Using Generalized Variance,”
+ Topics in Cognitive Science, p. tops.12688, Sep. 2023,
+ doi: 10.1111/tops.12688.
+
+
+ """
+ import matplotlib.pyplot as plt
+ sigma = 10
+ rho = 28
+ beta = 8.0/3
+ start = [0, 22, 10]
+ n = 10000
+ skip = 10000
+ dt = 0.012
+ data = datasets.lorenz_euler(n + skip, sigma, rho, beta, start=start, dt=dt)[skip:]
+
+ # fig = plt.figure()
+ # ax = fig.add_subplot(111, projection="3d")
+ # ax.plot(data[:, 0], data[:, 1], data[:, 2])
+ # plt.show()
+ # plt.close(fig)
+
+ lyap_expected = datasets.lorenz_lyap(sigma, rho, beta)
+ # Rationale for argument values:
+ # start with medium settings for min_tsep and lag, span a large area with trajectory_len, set fit_offset to 0
+ # up the embedding dimension until you get a clear line in the debug plot
+ # adjust trajectory_len and fit_offset to split off only the linear part
+ # in general: the longer the linear part of the plot, the better
+ lyap_r_args = dict(min_tsep=10, emb_dim=5, tau=dt, lag=5, trajectory_len=28, fit_offset=8, fit="poly")
+ lyap_rx = nolds.lyap_r(data[:, 0], **lyap_r_args)
+ lyap_ry = nolds.lyap_r(data[:, 1], **lyap_r_args)
+ lyap_rz = nolds.lyap_r(data[:, 2], **lyap_r_args)
+ # Rationale for argument values:
+ # Start with emb_dim=matrix_dim, medium min_tsep and min_nb
+ # After that, no good guidelines for stability. :(
+ # -> Just experiment with settings until you get close to expected value. ¯\_(ツ)_/¯
+ # NOTE: It seems from this example and `lyapunov-logistic` that lyap_e has a scaling problem.
+ lyap_e_args = dict(min_tsep=10, emb_dim=5, matrix_dim=5, tau=dt, min_nb=8)
+ lyap_ex = nolds.lyap_e(data[:, 0], **lyap_e_args)
+ lyap_ey = nolds.lyap_e(data[:, 1], **lyap_e_args)
+ lyap_ez = nolds.lyap_e(data[:, 2], **lyap_e_args)
+ print("Expected Lyapunov exponent: ", lyap_expected)
+ print("lyap_r(x) : ", lyap_rx)
+ print("lyap_r(y) : ", lyap_ry)
+ print("lyap_r(z) : ", lyap_rz)
+ print("lyap_e(x) : ", lyap_ex)
+ print("lyap_e(y) : ", lyap_ey)
+ print("lyap_e(z) : ", lyap_ez)
+ print()
+
+ # Rationale for argument values:
+ # Start with moderate settings for lag and a large span of rvals.
+ # Increase emb_dim until you get a clear line in the debug plot
+ # Clip rvals to select only the linear part of the plot.
+ # Increase lag as long as it increases the output. Stop when the output becomes smaller
+ # (or when you feel that the lag is unreasonably large.)
+ rvals = nolds.logarithmic_r(1, np.e, 1.1) # determined experimentally
+ corr_dim_args = dict(emb_dim=5, lag=10, fit="poly", rvals=rvals)
+ cdx = nolds.corr_dim(data[:, 0], **corr_dim_args)
+ cdy = nolds.corr_dim(data[:, 1], **corr_dim_args)
+ cdz = nolds.corr_dim(data[:, 2], **corr_dim_args)
+ # reference Grassberger-Procaccia 1983
+ print("Expected correlation dimension: 2.05")
+ print("corr_dim(x) : ", cdx)
+ print("corr_dim(y) : ", cdy)
+ print("corr_dim(z) : ", cdz)
+ print()
+
+ # Rationale for argument values:
+ # Start with a large range of nvals.
+ # Reduce those down cutting of the first few data points and then only keep the
+ # linear-ish looking part of the initial rise.
+ hurst_rs_args = dict(fit="poly", nvals=nolds.logarithmic_n(10, 70, 1.1))
+ hx = nolds.hurst_rs(data[:, 0], **hurst_rs_args)
+ hy = nolds.hurst_rs(data[:, 1], **hurst_rs_args)
+ hz = nolds.hurst_rs(data[:, 2], **hurst_rs_args)
+ # reference: Suyal 2009
+ print("Expected hurst exponent: 0.64 < H < 0.93")
+ print("hurst_rs(x) : ", hx)
+ print("hurst_rs(y) : ", hy)
+ print("hurst_rs(z) : ", hz)
+ print()
+
+ # reference: Wallot 2023, Table 1
+ # Rationale for argument values: Just follow paper
+ # NOTE since DFA is quite fast and Wallot 2023 use different initial values
+ # (x = y = z = 0.1 + e) and size of data (100k data points, 1000 runs) and
+ # don't report step size, we use different data here
+ data_dfa = datasets.lorenz_euler(120000, 10, 28, 8/3.0, start=[0.1,0.1,0.1], dt=0.002)[20000:]
+ nvals = nolds.logarithmic_n(200, len(data_dfa)/8, 2**0.2)
+ dfa_args = dict(nvals=nvals, order=2, overlap=False, fit_exp="poly")
+ dx = nolds.dfa(data_dfa[:, 0], **dfa_args)
+ dy = nolds.dfa(data_dfa[:, 1], **dfa_args)
+ dz = nolds.dfa(data_dfa[:, 2], **dfa_args)
+ print("Expected hurst parameter: [1.008 ±0.016, 0.926 ±0.016, 0.650 ±0.22]")
+ print("dfa(x) : ", dx)
+ print("dfa(y) : ", dy)
+ print("dfa(z) : ", dz)
+ print()
+
+ # reference: Kaffashi 2008
+ # Rationale for argument values: Just follow paper.
+ sampen_args = dict(emb_dim=2, lag=1)
+ sx = nolds.sampen(data[:, 0], **sampen_args)
+ sy = nolds.sampen(data[:, 1], **sampen_args)
+ sz = nolds.sampen(data[:, 2], **sampen_args)
+ print("Expected sample entropy: [0.15, 0.15, 0.25]")
+ print("sampen(x): ", sx)
+ print("sampen(y): ", sy)
+ print("sampen(z): ", sz)
+
+
if __name__ == "__main__":
# run this with the following command:
# python -m nolds.examples lyapunov-logistic
import sys
+
def print_options():
print("options are:")
print(" lyapunov-logistic")
@@ -288,6 +631,10 @@ Source code for nolds.examples
print(" hurst-weron2")
print(" hurst-hist")
print(" hurst-nvals")
+ print(" sampen-tol")
+ print(" aste-line")
+ print(" hurst-mf-stock")
+ print(" lorenz")
if len(sys.argv) < 2:
print("please tell me which tests you want to run")
print_options()
@@ -304,16 +651,55 @@ Source code for nolds.examples
plot_hurst_hist()
elif sys.argv[1] == "hurst-nvals":
hurst_compare_nvals(datasets.brown72)
+ elif sys.argv[1] == "sampen-tol":
+ sampen_default_tolerance()
+ elif sys.argv[1] == "aste-line":
+ aste_line_fitting()
+ elif sys.argv[1] == "hurst-mf-stock":
+ hurst_mf_stock()
+ elif sys.argv[1] == "hurst-mf-barabasi2":
+ barabasi_1991_figure2()
+ elif sys.argv[1] == "hurst-mf-barabasi3":
+ barabasi_1991_figure3()
+ elif sys.argv[1] == "lorenz":
+ lorenz()
else:
print("i do not know any test of that name")
print_options()
+
-
-
+
+
+Nolds
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Navigation
+
+
+
Related Topics
- Documentation overview
@@ -322,26 +708,24 @@ Related Topics
-
- Quick search
-
-
-
+
+
+
+
+
+
+
+
- ©2016-2018, Christopher Schölzel.
+ ©2016-2024, Christopher Schölzel.
|
- Powered by Sphinx 1.6.6
- & Alabaster 0.7.10
+ Powered by Sphinx 8.0.2
+ & Alabaster 1.0.0
diff --git a/_modules/nolds/measures.html b/_modules/nolds/measures.html
index a0211a2..734bc57 100644
--- a/_modules/nolds/measures.html
+++ b/_modules/nolds/measures.html
@@ -1,59 +1,56 @@
+
-
-
-
+
-
- nolds.measures — Nolds 0.5.1 documentation
-
-
-
-
-
-
-
+
+
+ nolds.measures — Nolds 0.6.0 documentation
+
+
+
+
+
+
+
+
-
-
-
+
+
+
Source code for nolds.measures
# -*- coding: utf-8 -*-
-from __future__ import (absolute_import, division,
+from __future__ import (absolute_import, division,
print_function, unicode_literals)
-from builtins import *
+from builtins import (
+ bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next,
+ oct, open, pow, round, super, filter, map, zip
+)
import numpy as np
import warnings
import math
+
def rowwise_chebyshev(x, y):
return np.max(np.abs(x - y), axis=1)
+
def rowwise_euclidean(x, y):
return np.sqrt(np.sum((x - y)**2, axis=1))
+
def poly_fit(x, y, degree, fit="RANSAC"):
# check if we can use RANSAC
if fit == "RANSAC":
@@ -95,7 +92,7 @@ Source code for nolds.measures
def delay_embedding(data, emb_dim, lag=1):
- """
+ """
Perform a time-delay embedding of a time series
Args:
@@ -124,8 +121,11 @@ Source code for nolds.measures
indices += np.arange(m).reshape((m, 1))
return data[indices]
-[docs]def lyap_r_len(**kwargs):
- """
+
+
+[docs]
+def lyap_r_len(**kwargs):
+ """
Helper function that calculates the minimum number of data points required
to use lyap_r.
@@ -148,10 +148,14 @@ Source code for nolds.measures
min_len += kwargs['min_tsep'] * 2 + 1
return min_len
-[docs]def lyap_r(data, emb_dim=10, lag=None, min_tsep=None, tau=1, min_neighbors=20,
+
+
+
+[docs]
+def lyap_r(data, emb_dim=10, lag=None, min_tsep=None, tau=1, min_neighbors=20,
trajectory_len=20, fit="RANSAC", debug_plot=False, debug_data=False,
plot_file=None, fit_offset=0):
- """
+ """
Estimates the largest Lyapunov exponent using the algorithm of Rosenstein
et al. [lr_1]_.
@@ -216,6 +220,8 @@ Source code for nolds.measures
.. [lr_b] Shapour Mohammadi, "LYAPROSEN: MATLAB function to calculate
Lyapunov exponent",
url: https://ideas.repec.org/c/boc/bocode/t741502.html
+ .. [lr_c] Rainer Hegger, Holger Kantz, and Thomas Schreiber, "TISEAN 3.0.0 - Nonlinear Time Series Analysis",
+ url: https://www.pks.mpg.de/tisean/Tisean_3.0.0/docs/docs_c/lyap_r.html
Args:
data (iterable of float):
@@ -259,13 +265,13 @@ Source code for nolds.measures
a strong indicator for chaos)
(1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
- ``(ks, div_traj, poly)`` where ``ks`` are the x-values of the line fit,
+ ``(ks, div_traj, poly)`` where ``ks`` are the x-values of the line fit,
``div_traj`` are the y-values and ``poly`` are the line coefficients
(``[slope, intercept]``).
"""
# convert data to float to avoid overflow errors in rowwise_euclidean
- data = np.asarray(data, dtype="float32")
+ data = np.asarray(data, dtype=np.float64)
n = len(data)
max_tsep_factor = 0.25
if lag is None or min_tsep is None:
@@ -273,8 +279,11 @@ Source code for nolds.measures
f = np.fft.rfft(data, n * 2 - 1)
if min_tsep is None:
# calculate min_tsep as mean period (= 1 / mean frequency)
- mf = np.fft.rfftfreq(n * 2 - 1) * np.abs(f)
- mf = np.mean(mf[1:]) / np.sum(np.abs(f[1:]))
+ # to get the mean frequency, we weight the frequency buckets in the
+ # fft result by the absolute power in that bucket and then divide
+ # by the total power accross all buckets to get a weigthed mean
+ mf = np.fft.rfftfreq(n * 2 - 1) * f**2
+ mf = np.sum(mf[1:]) / np.sum(f[1:]**2)
min_tsep = int(np.ceil(1.0 / mf))
if min_tsep > max_tsep_factor * n:
min_tsep = int(max_tsep_factor * n)
@@ -291,6 +300,7 @@ Source code for nolds.measures
acorr = np.roll(acorr, n - 1)
eps = acorr[n - 1] * (1 - 1.0 / np.e)
lag = 1
+
# small helper function to calculate resulting number of vectors for a
# given lag value
def nb_neighbors(lag_value):
@@ -300,7 +310,7 @@ Source code for nolds.measures
)
return max(0, n - min_len)
# find lag
- for i in range(1,n):
+ for i in range(1, n):
lag = i
if acorr[n - 1 + i] < eps or acorr[n - 1 - i] < eps:
break
@@ -333,7 +343,7 @@ Source code for nolds.measures
dists[i, max(0, i - min_tsep):i + min_tsep + 1] = float("inf")
# check that we have enough data points to continue
ntraj = m - trajectory_len + 1
- min_traj = min_tsep * 2 + 2 # in each row min_tsep + 1 disances are inf
+ min_traj = min_tsep * 2 + 2 # in each row min_tsep + 1 disances are inf
if ntraj <= 0:
msg = "Not enough data points. Need {} additional data points to follow " \
+ "a complete trajectory."
@@ -349,7 +359,7 @@ Source code for nolds.measures
# find nearest neighbors (exclude last columns, because these vectors cannot
# be followed in time for trajectory_len steps)
nb_idx = np.argmin(dists[:ntraj, :ntraj], axis=1)
-
+
# build divergence trajectory by averaging distances along the trajectory
# over all neighbor pairs
div_traj = np.zeros(trajectory_len, dtype=float)
@@ -377,15 +387,21 @@ Source code for nolds.measures
# normal line fitting
poly = poly_fit(ks[fit_offset:], div_traj[fit_offset:], 1, fit=fit)
if debug_plot:
- plot_reg(ks[fit_offset:], div_traj[fit_offset:], poly, "k", "log(d(k))", fname=plot_file)
+ plot_reg(
+ ks[fit_offset:], div_traj[fit_offset:],
+ poly, "k", "log(d(k))", fname=plot_file)
le = poly[0] / tau
if debug_data:
return (le, (ks, div_traj, poly))
else:
return le
-[docs]def lyap_e_len(**kwargs):
- """
+
+
+
+[docs]
+def lyap_e_len(**kwargs):
+ """
Helper function that calculates the minimum number of data points required
to use lyap_e.
@@ -411,9 +427,13 @@ Source code for nolds.measures
min_len += kwargs['min_nb']
return min_len
-[docs]def lyap_e(data, emb_dim=10, matrix_dim=4, min_nb=None, min_tsep=0, tau=1,
+
+
+
+[docs]
+def lyap_e(data, emb_dim=10, matrix_dim=4, min_nb=None, min_tsep=0, tau=1,
debug_plot=False, debug_data=False, plot_file=None):
- """
+ """
Estimates the Lyapunov exponents for the given data using the algorithm of
Eckmann et al. [le_1]_.
@@ -516,7 +536,8 @@ Source code for nolds.measures
Lyapunov exponents from the x iterations of R_i. The shape of this debug
data is (x, matrix_dim).
"""
- data = np.asarray(data)
+ # convert to float to avoid errors when using 'inf' as distance
+ data = np.asarray(data, dtype=np.float64)
n = len(data)
if (emb_dim - 1) % (matrix_dim - 1) != 0:
raise ValueError("emb_dim - 1 must be divisible by matrix_dim - 1!")
@@ -529,8 +550,8 @@ Source code for nolds.measures
emb_dim=emb_dim, matrix_dim=matrix_dim, min_nb=min_nb, min_tsep=min_tsep
)
if n < min_len:
- msg = "{} data points are not enough! For emb_dim = {}, matrix_dim = {}, " \
- + "min_tsep = {} and min_nb = {} you need at least {} data points " \
+ msg = "{} data points are not enough! For emb_dim = {}, matrix_dim = {}" \
+ + ", min_tsep = {} and min_nb = {} you need at least {} data points " \
+ "in your time series"
warnings.warn(
msg.format(n, emb_dim, matrix_dim, min_tsep, min_nb, min_len),
@@ -552,7 +573,7 @@ Source code for nolds.measures
+ "to have min_nb = {} neighbor candidates"
raise ValueError(msg.format(min_nb-len(orbit), min_nb))
old_Q = np.identity(matrix_dim)
- lexp = np.zeros(matrix_dim, dtype="float32")
+ lexp = np.zeros(matrix_dim, dtype=np.float64)
lexp_counts = np.zeros(lexp.shape)
debug_values = []
# TODO reduce number of points to visit?
@@ -618,11 +639,11 @@ Source code for nolds.measures
# x_j1+(d_M)m - x_i+(d_M)m
# x_j2+(d_M)m - x_i+(d_M)m
# ...
- if max(np.max(indices),i) + matrix_dim * m >= len(data):
+ if max(np.max(indices), i) + matrix_dim * m >= len(data):
assert len(data) < min_len
msg = "Not enough data points. Cannot follow orbit vector {} for " \
- + "{} (matrix_dim * m) time steps. Input must have at least length " \
- + "{}."
+ + "{} (matrix_dim * m) time steps. Input must have at least " \
+ + "length {}."
raise ValueError(msg.format(i, matrix_dim * m, min_len))
vec_beta = data[indices + matrix_dim * m] - data[i + matrix_dim * m]
@@ -653,7 +674,7 @@ Source code for nolds.measures
diag_R = np.diag(mat_R)
# filter zeros in mat_R (would lead to -infs)
idx = np.where(diag_R > 0)
- lexp_i = np.zeros(diag_R.shape, dtype="float32")
+ lexp_i = np.zeros(diag_R.shape, dtype=np.float64)
lexp_i[idx] = np.log(diag_R[idx])
lexp_i[np.where(diag_R == 0)] = np.inf
if debug_plot or debug_data:
@@ -678,6 +699,7 @@ Source code for nolds.measures
return lexp
+
def plot_dists(dists, tolerance, m, title=None, fname=None):
# local import to avoid dependency for non-debug use
import matplotlib.pyplot as plt
@@ -686,7 +708,7 @@ Source code for nolds.measures
dists_full = np.concatenate(dists)
ymax = len(dists_full) * 0.05
mean = np.mean(dists_full)
- std = np.std(dists_full)
+ std = np.std(dists_full, ddof=1)
rng = (0, mean + std * nstd)
i = 0
colors = ["green", "blue"]
@@ -709,9 +731,11 @@ Source code for nolds.measures
plt.close()
-[docs]def sampen(data, emb_dim=2, tolerance=None, dist=rowwise_chebyshev,
- debug_plot=False, debug_data=False, plot_file=None):
- """
+
+[docs]
+def sampen(data, emb_dim=2, tolerance=None, lag=1, dist=rowwise_chebyshev,
+ closed=False, debug_plot=False, debug_data=False, plot_file=None):
+ """
Computes the sample entropy of the given data.
Explanation of the sample entropy:
@@ -724,7 +748,7 @@ Source code for nolds.measures
Explanation of the algorithm:
The algorithm constructs all subsequences of length emb_dim
- [s_1, s_2, s_3, ...] and then counts each pair (s_i, s_j) with i != j
+ [s_1, s_1+lag, s_1+2*lag, ...] and then counts each pair (s_i, s_j) with i != j
where dist(s_i, s_j) < tolerance. The same process is repeated for all
subsequences of length emb_dim + 1. The sum of similar sequence pairs
with length emb_dim + 1 is divided by the sum of similar sequence pairs
@@ -750,11 +774,18 @@ Source code for nolds.measures
the embedding dimension (length of vectors to compare)
tolerance (float):
distance threshold for two template vectors to be considered equal
- (default: 0.2 * std(data))
+ (default: 0.2 * std(data) at emb_dim = 2, corrected for dimension effect
+ for other values of emb_dim)
+ lag (int):
+ delay for the delay embedding
dist (function (2d-array, 1d-array) -> 1d-array):
distance function used to calculate the distance between template
- vectors. Sampen is defined using ``rowwise_chebyshev``. You should only use
- something else, if you are sure that you need it.
+ vectors. Sampen is defined using ``rowwise_chebyshev``. You should only
+ use something else, if you are sure that you need it.
+ closed (boolean):
+ if True, will check for vector pairs whose distance is in the closed
+ interval [0, r] (less or equal to r), otherwise the open interval
+ [0, r) (less than r) will be used
debug_plot (boolean):
if True, a histogram of the individual distances for m and m+1
debug_data (boolean):
@@ -768,14 +799,26 @@ Source code for nolds.measures
float:
the sample entropy of the data (negative logarithm of ratio between
similar template vectors of length emb_dim + 1 and emb_dim)
+ [c_m, c_m1]:
+ list of two floats: count of similar template vectors of length emb_dim
+ (c_m) and of length emb_dim + 1 (c_m1)
[float list, float list]:
- Lists of lists of the form ``[dists_m, dists_m1]`` containing the distances
- between template vectors for m (dists_m) and for m + 1 (dists_m1).
+ Lists of lists of the form ``[dists_m, dists_m1]`` containing the
+ distances between template vectors for m (dists_m)
+ and for m + 1 (dists_m1).
"""
data = np.asarray(data)
-
+
if tolerance is None:
- tolerance = 0.2 * np.std(data)
+ # the reasoning behind this default value is the following:
+ # 1. physionet uses the default values emb_dim = 2, tolerance = 0.2
+ # 2. the chebyshev distance rises logarithmically with increasing dimension
+ # 3. 0.5627 * np.log(emb_dim) + 1.3334 is the logarithmic trend line for
+ # the chebyshev distance of vectors sampled from a univariate normal
+ # distribution
+ # 4. 0.1164 is used as a factor to ensure that tolerance == std * 0.2 for
+ # emb_dim == 2
+ tolerance = np.std(data, ddof=1) * 0.1164 * (0.5627 * np.log(emb_dim) + 1.3334)
n = len(data)
# build matrix of "template vectors"
@@ -793,7 +836,7 @@ Source code for nolds.measures
# because this vector has no corresponding vector of length m+1 and thus does
# not count towards the conditional probability
# (otherwise first dimension would be n-emb_dim+1 and not n-emb_dim)
- tVecs = delay_embedding(np.asarray(data), emb_dim+1, lag=1)
+ tVecs = delay_embedding(np.asarray(data), emb_dim+1, lag=lag)
plot_data = []
counts = []
for m in [emb_dim, emb_dim + 1]:
@@ -804,26 +847,49 @@ Source code for nolds.measures
# successively calculate distances between each pair of template vectors
for i in range(len(tVecsM) - 1):
dsts = dist(tVecsM[i + 1:], tVecsM[i])
- if debug_plot:
+ if debug_plot or debug_data:
plot_data[-1].extend(dsts)
# count how many distances are smaller than the tolerance
- counts[-1] += np.sum(dsts < tolerance)
- if counts[1] == 0:
- # log would be infinite => cannot determine saen
- saen = np.inf
- else:
+ if closed:
+ counts[-1] += np.sum(dsts <= tolerance)
+ else:
+ counts[-1] += np.sum(dsts < tolerance)
+ if counts[0] > 0 and counts[1] > 0:
saen = -np.log(1.0 * counts[1] / counts[0])
+ else:
+ # log would be infinite or undefined => cannot determine saen
+ zcounts = []
+ if counts[0] == 0:
+ zcounts.append("emb_dim")
+ if counts[1] == 0:
+ zcounts.append("emb_dim + 1")
+ warnings.warn(
+ (
+ "Zero vectors are within tolerance for %s. " \
+ + "Consider raising the tolerance parameter to avoid %s result."
+ ) % (" and ".join(zcounts), "NaN" if len(zcounts) == 2 else "inf"),
+ RuntimeWarning
+ )
+ if counts[0] == 0 and counts[1] == 0:
+ saen = np.nan
+ elif counts[0] == 0:
+ saen = -np.inf
+ else:
+ saen = np.inf
if debug_plot:
plot_dists(plot_data, tolerance, m, title="sampEn = {:.3f}".format(saen),
fname=plot_file)
if debug_data:
- return (saen, plot_data)
+ return (saen, counts, plot_data)
else:
return saen
-[docs]def binary_n(total_N, min_n=50):
- """
+
+
+[docs]
+def binary_n(total_N, min_n=50):
+ """
Creates a list of values by successively halving the total length total_N
until the resulting value is less than min_n.
@@ -845,8 +911,11 @@ Source code for nolds.measures
return [int(np.floor(1.0 * total_N / (2**i))) for i in range(1, max_exp + 1)]
-[docs]def logarithmic_n(min_n, max_n, factor):
- """
+
+
+[docs]
+def logarithmic_n(min_n, max_n, factor):
+ """
Creates a list of values by successively multiplying a minimum value min_n by
a factor > 1 until a maximum value max_n is reached.
@@ -878,8 +947,12 @@ Source code for nolds.measures
ns.append(n)
return ns
-[docs]def logmid_n(max_n, ratio=1/4.0, nsteps=15):
- """
+
+
+
+[docs]
+def logmid_n(max_n, ratio=1/4.0, nsteps=15):
+ """
Creates an array of integers that lie evenly spaced in the "middle" of the
logarithmic scale from 0 to log(max_n).
@@ -915,8 +988,12 @@ Source code for nolds.measures
nvals = np.round(np.exp(midrange)).astype("int32")
return np.unique(nvals)
-[docs]def logarithmic_r(min_n, max_n, factor):
- """
+
+
+
+[docs]
+def logarithmic_r(min_n, max_n, factor):
+ """
Creates a list of values by successively multiplying a minimum value min_n by
a factor > 1 until a maximum value max_n is reached.
@@ -938,8 +1015,11 @@ Source code for nolds.measures
return [min_n * (factor ** i) for i in range(max_i + 1)]
-[docs]def expected_rs(n):
- """
+
+
+[docs]
+def expected_rs(n):
+ """
Calculates the expected (R/S)_n for white noise for a given n.
This is used as a correction factor in the function hurst_rs. It uses the
@@ -954,7 +1034,7 @@ Source code for nolds.measures
expected (R/S)_n for white noise
"""
front = (n - 0.5) / n
- i = np.arange(1,n)
+ i = np.arange(1, n)
back = np.sum(np.sqrt((n - i) / i))
if n <= 340:
middle = math.gamma((n-1) * 0.5) / math.sqrt(math.pi) / math.gamma(n * 0.5)
@@ -962,8 +1042,12 @@ Source code for nolds.measures
middle = 1.0 / math.sqrt(n * math.pi * 0.5)
return front * middle * back
-[docs]def expected_h(nvals, fit="RANSAC"):
- """
+
+
+
+[docs]
+def expected_h(nvals, fit="RANSAC"):
+ """
Uses expected_rs to calculate the expected value for the Hurst exponent h
based on the values of n used for the calculation.
@@ -986,8 +1070,9 @@ Source code for nolds.measures
return poly[0]
+
def rs(data, n, unbiased=True):
- """
+ """
Calculates an individual R/S value in the rescaled range approach for
a given n.
@@ -1013,7 +1098,7 @@ Source code for nolds.measures
"""
data = np.asarray(data)
total_N = len(data)
- m = total_N // n # number of sequences
+ m = total_N // n # number of sequences
# cut values at the end of data to make the array divisible by n
data = data[:total_N - (total_N % n)]
# split remaining data into subsequences of length n
@@ -1041,7 +1126,7 @@ Source code for nolds.measures
return np.mean(r / s)
-def plot_histogram_matrix(data, name, fname=None):
+def plot_histogram_matrix(data, name, bin_range="3sigma", fname=None):
# local import to avoid dependency for non-debug use
import matplotlib.pyplot as plt
nhists = len(data[0])
@@ -1052,10 +1137,16 @@ Source code for nolds.measures
for i in range(nhists):
plt.subplot(nrows, nrows, i + 1)
absmax = max(abs(np.max(data[:, i])), abs(np.min(data[:, i])))
- rng = (-absmax, absmax)
+ if bin_range == "absmax":
+ rng = (-absmax, absmax)
+ elif bin_range.endswith("sigma"):
+ n = int(bin_range[:-len("sigma")])
+ mu = np.mean(data[:,i])
+ sigma = np.std(data[:, i], ddof=1)
+ rng = (mu - n * sigma, mu + n * sigma)
h, bins = np.histogram(data[:, i], nbins, rng)
bin_width = bins[1] - bins[0]
- h = h.astype("float32") / np.sum(h)
+ h = h.astype(np.float64) / np.sum(h)
plt.bar(bins[:-1], h, bin_width)
plt.axvline(np.mean(data[:, i]), color="red")
plt.ylim(ylim)
@@ -1069,10 +1160,10 @@ Source code for nolds.measures
def plot_reg(xvals, yvals, poly, x_label="x", y_label="y", data_label="data",
reg_label="regression line", fname=None):
- """
+ """
Helper function to plot trend lines for line-fitting approaches. This
- function will show a plot through ``plt.show()`` and close it after the window
- has been closed by the user.
+ function will show a plot through ``plt.show()`` and close it after the
+ window has been closed by the user.
Args:
xvals (list/array of float):
@@ -1109,9 +1200,67 @@ Source code for nolds.measures
plt.close()
-[docs]def hurst_rs(data, nvals=None, fit="RANSAC", debug_plot=False,
+def plot_reg_tiled(xvals, yvals, polys, x_label="x", y_label="y",
+ data_labels=None, reg_labels=None, fname=None,
+ columns=None):
+ """
+ TODO
+ """
+ # local import to avoid dependency for non-debug use
+ import matplotlib.pyplot as plt
+ max_span = max([np.max(y) - np.min(y) for y in yvals])
+ means = [np.mean(y) for y in yvals]
+ if columns is None:
+ columns = min(4, int(np.ceil(np.sqrt(len(xvals)))))
+ if data_labels is None:
+ data_labels = ["data"] * len(xvals)
+ if reg_labels is None:
+ reg_labels = ["regression line"] * len(xvals)
+ for i in range(len(xvals)):
+ plt.subplot(int(np.ceil(len(xvals) / columns)), columns, i + 1)
+ plt.plot(xvals[i], yvals[i], "bo", label=data_labels[i])
+ if not (polys is None):
+ plt.plot(xvals[i], np.polyval(polys[i], xvals[i]), "r-", label=reg_labels[i])
+ plt.xlabel(x_label)
+ plt.ylabel(y_label)
+ plt.ylim(means[i] - max_span / 2, means[i] + max_span / 2)
+ plt.legend(loc="best")
+ if fname is None:
+ plt.show()
+ else:
+ plt.savefig(fname)
+ plt.close()
+
+
+def plot_reg_multiple(xvals, yvals, polys, x_label="x", y_label="y",
+ data_labels=None, reg_labels=None, fname=None):
+ """
+ TODO
+ """
+ import matplotlib.pyplot as plt
+ if data_labels is None:
+ data_labels = ["data"] * len(xvals)
+ if reg_labels is None:
+ reg_labels = ["regression line"] * len(xvals)
+ for i in range(len(xvals)):
+ plt.plot(xvals[i], yvals[i], "+", label=data_labels[i])
+ if not (polys is None):
+ plt.plot(xvals[i], np.polyval(polys[i], xvals[i]), label=reg_labels[i])
+ plt.xlabel(x_label)
+ plt.ylabel(y_label)
+ plt.legend(loc="best")
+ if fname is None:
+ plt.show()
+ else:
+ plt.savefig(fname)
+ plt.close()
+
+
+
+[docs]
+def hurst_rs(data, nvals=None, fit="RANSAC", debug_plot=False,
debug_data=False, plot_file=None, corrected=True, unbiased=True):
- """
+ """
Calculates the Hurst exponent by a standard rescaled range (R/S) approach.
Explanation of Hurst exponent:
@@ -1125,9 +1274,9 @@ Source code for nolds.measures
capacity that would be required to keep the discharge steady at its mean
value.
- To do so, we first substract the mean over all x_i from the individual
+ To do so, we first subtract the mean over all x_i from the individual
x_i to obtain the departures x'_i from the mean for each year i. As the
- excess or deficit in discharge always carrys over from year i to year i+1,
+ excess or deficit in discharge always carries over from year i to year i+1,
we need to examine the cumulative sum of x'_i, denoted by y_i. This
cumulative sum represents the filling of our hypothetical storage. If the
sum is above 0, we are storing excess discharge from the river, if it is
@@ -1180,16 +1329,16 @@ Source code for nolds.measures
of the length of the sequence as n. The length is reduced by at
most 1% to find the value that has the most divisors.
- * The "Simple R/S" estimate is just log((R/S)_n) / log(n) for
+ * The "Simple R/S" estimate is just log((R/S)_n) / log(n) for
n = N.
* The "theoretical Hurst exponent" is the value that would be
expected of an uncorrected rescaled range approach for random
noise of the size of the input data.
* The "empirical Hurst exponent" is the uncorrected Hurst exponent
obtained by the rescaled range approach.
- * The "corrected empirical Hurst exponent" is the Anis-Lloyd-Peters
- corrected Hurst exponent, but with sqrt(1/2 * pi * n) added to
- the (R/S)_n before the log.
+ * The "corrected empirical Hurst exponent" is the
+ Anis-Lloyd-Peters corrected Hurst exponent, but with
+ sqrt(1/2 * pi * n) added to the (R/S)_n before the log.
* The "corrected R over S Hurst exponent" uses the R-function "lm"
instead of pracmas own "polyfit" and uses n = N/2, N/4, N/8, ...
by successively halving the subsequences (which means that some
@@ -1212,8 +1361,6 @@ Source code for nolds.measures
implementation.
.. [h_c] Bill Davidson, "Hurst exponent",
url: http://www.mathworks.com/matlabcentral/fileexchange/9842-hurst-exponent
- .. [h_d] Tomaso Aste, "Generalized Hurst exponent",
- url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent
Args:
data (array-like of float):
@@ -1226,10 +1373,10 @@ Source code for nolds.measures
Generally, the choice for n is a trade-off between the length and the
number of the subsequences that are used for the calculation of the
- (R/S)_n. Very low values of n lead to high variance in the ``r`` and ``s``
- while very high values may leave too few subsequences that the mean along
- them is still meaningful. Logarithmic spacing makes sense, because it
- translates to even spacing in the log-log-plot.
+ (R/S)_n. Very low values of n lead to high variance in the ``r`` and
+ ``s`` while very high values may leave too few subsequences that the mean
+ along them is still meaningful. Logarithmic spacing makes sense, because
+ it translates to even spacing in the log-log-plot.
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
@@ -1260,8 +1407,8 @@ Source code for nolds.measures
long-range correlations)
(1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
- ``(nvals, rsvals, poly)`` where ``nvals`` are the values used for log(n),
- ``rsvals`` are the corresponding log((R/S)_n) and ``poly`` are the line
+ ``(nvals, rsvals, poly)`` where ``nvals`` are the values used for log(n),
+ ``rsvals`` are the corresponding log((R/S)_n) and ``poly`` are the line
coefficients (``[slope, intercept]``)
"""
data = np.asarray(data)
@@ -1282,7 +1429,10 @@ Source code for nolds.measures
if len(rsvals) == 0:
poly = [np.nan, np.nan]
if debug_plot:
- warnings.warn("Cannot display debug plot, all (R/S)_n are NaN")
+ warnings.warn(
+ "Cannot display debug plot, all (R/S)_n are NaN",
+ RuntimeWarning
+ )
else:
# fit a line to the logarithm of the obtained (R/S)_n
xvals = np.log(nvals)
@@ -1301,11 +1451,470 @@ Source code for nolds.measures
else:
return h
-# TODO implement generalized hurst exponent H_q
-[docs]def corr_dim(data, emb_dim, rvals=None, dist=rowwise_euclidean,
+# TODO implement MFDFA as second (more reliable) measure for multifractality
+# NOTE: probably not needed, since mfhurst_b is already pretty reliable
+
+
+
+[docs]
+def mfhurst_b(data, qvals=None, dists=None, fit='poly',
+ debug_plot=False, debug_data=False, plot_file=None):
+ """
+ Calculates the Generalized Hurst Exponent H_q for different q according to
+ A.-L. Barabási and T. Vicsek.
+
+ Explanation of the Generalized Hurst Exponent:
+ The Generalized Hurst Exponent (GHE, H_q or H(q)) can (as the name implies)
+ be seen as a generalization of the Hurst exponent for data series with
+ multifractal properties. It's origins are however not directly related
+ to Hurst's rescaled range approach, but to the definition of self-affine
+ functions.
+
+ A single-valued self-affine function h by definition satisfies the relation
+
+ h(x) ~= lambda^(-H) h(lambda x)
+
+ for any positive real valued lambda and some positive real valued exponent
+ H, which is called the Hurst, Hölder, Hurst-Hölder or roughness exponent
+ in the literature. In other words you can view lambda as a scaling factor
+ or "step size". With lambda < 1 we decrease the step size and zoom into our
+ function. In this case lambda^(-H) becomes greater than one, meaning that
+ h(lambda x) looks similar to a smaller version of h(x). With lambda > 1 we
+ zoom out and get lambda^(-H) < 1.
+
+ To calculate H, you can use the height-height correlation function (also
+ called autocorrelation) c(d) = <(h(x) - h(x + d))^2>_x where <...>_x
+ denotes the expected value over x. Here, the aforementioned self-affine
+ property is equivalent to c(d) ~ d^(2H). You can also think of d as a step
+ size. Increasing or decreasing d from 1 to some y is the same as setting
+ lambda = y: It increases or decreases the scale of the function by a factor
+ of 1/y^(-H) = y^H. Therefore the squared differences will be proportional
+ to y^2H.
+
+ A.-L. Barabási and T. Vicsek extended this notion to an infinite hierarchy
+ of exponents H_q for the qth-order correlation function with
+
+ c_q(d) = <(h(x) - h(x + d))^q>_x ~ d^(q H_q)
+
+ With q = 1 you get a value H_1 that is closely related to the normal Hurst
+ exponent, but with different q you either get a constant value H_q = H_0
+ independent of q, which indicates that the function has no multifractal
+ properties, or different H_q, which is a sign for multifractal behavior.
+
+ T. Di Matteo, T. Aste and M. M. Dacorogna applied this technique to
+ financial data series and gave it the name "Generalized Hurst Exponent".
+
+ Explanation of the Algorithm:
+ Curiously, I could not find any algorithmic description how to calculate
+ H_q in the literature. Researchers seem to just imply that you can obtain
+ the exponent by a line fitting algorithm in a log-log plot, but they do not
+ talk about the actual procedure or the required parameters.
+
+ Essentially, we can calculate c_q(d) of a discrete evenly sampled time
+ series Y = [y_0, y_1, y_2, ... y_(N-1)] by taking the absolute differences
+ [|y_0 - y_d|, |y_1 - y_(d+1)|, ... , |y_(N-d-1) - y_(N-1)|] raising them to
+ the qth power and taking the mean.
+
+ Now we take the logarithm on both sides of our relation c_q(d) ~ d^(q H_q)
+ and get
+
+ log(c_q(d)) ~ log(d) * q H_q
+
+ So in other words if we plot log(c_q(d)) against log(d) for several d we
+ should get a straight line with slope q H_q. This enables us to use a
+ linear least squares algorithm to obtain H_q.
+
+ Note that we consider x as a discrete variable in the range 0 <= x < N.
+ We can do this, because the actual sampling rate of our data series does
+ not alter the result. After taking the logarithm any scaling factor delta_x
+ would only result in an additive term since
+ log(delta_x * x) = log(x) + log(delta_x) and we only care about the slope
+ of the line and not the intercept.
+
+ References:
+ .. [mh_1] A.-L. Barabási and T. Vicsek, “Multifractality of self-affine
+ fractals,” Physical Review A, vol. 44, no. 4, pp. 2730–2733, 1991.
+
+ Args:
+ data (array-like of float):
+ time series of data points (should be evenly sampled)
+
+ Kwargs:
+ qvals (iterable of float or int):
+ values of q for which H_q should be calculated (default: [1])
+ dists (iterable of int):
+ distances for which the height-height correlation should be calculated
+ (determines the x-coordinates in the log-log plot)
+ default: logarithmic_n(1, max(20, 0.02 * len(data)), 1.5) to ensure
+ even spacing on the logarithmic axis
+ fit (str):
+ the fitting method to use for the line fit, either 'poly' for normal
+ least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
+ is more robust to outliers
+ debug_plot (boolean):
+ if True, a simple plot of the final line-fitting step will be shown
+ debug_data (boolean):
+ if True, debugging data will be returned alongside the result
+ plot_file (str):
+ if debug_plot is True and plot_file is not None, the plot will be saved
+ under the given file name instead of directly showing it through
+ ``plt.show()``
+
+ Returns:
+ array of float:
+ list of H_q for every q given in ``qvals``
+ (1d-vector, 2d-vector, 2d-vector):
+ only present if debug_data is True: debug data of the form
+ ``(xvals, yvals, poly)`` where ``xvals`` is the logarithm of ``dists``,
+ ``yvals`` are the logarithms of the corresponding height-height-
+ correlations for each distance (first dimension) and each q
+ (second dimension) in the shape len(dists) x len(qvals) and ``poly`` are
+ the line coefficients (``[slope, intercept]``) for each q in the shape
+ len(qvals) x 2.
+ """
+ # transform to array if necessary
+ data = np.asarray(data, dtype=np.float64)
+ if qvals is None:
+ # actual default parameter would introduce shared list
+ # see: http://pylint-messages.wikidot.com/messages:w0102
+ qvals = [1]
+ if dists is None:
+ dists = logarithmic_n(1, max(20, 0.02 * len(data)), 1.5)
+ dists = np.asarray(dists)
+ if len(data) < 60:
+ warnings.warn(
+ "H(q) is not reliable for small time series ({} < 60)".format(len(data))
+ )
+
+ def hhcorr(d, q):
+ diffs = np.abs(data[:-d] - data[d:])
+ diffs = diffs[np.where(diffs > 0)]
+ return np.mean(diffs ** q)
+
+ # calculate height-height correlations
+ corrvals = [hhcorr(d, q) for d in dists for q in qvals]
+ corrvals = np.array(corrvals, dtype=np.float64)
+ corrvals = corrvals.reshape(len(dists), len(qvals))
+
+ # line fitting
+ xvals = np.log(dists)
+ yvals = np.log(corrvals)
+ polys = [
+ poly_fit(xvals, yvals[:, qi], 1, fit=fit)
+ for qi in range(len(qvals))
+ ]
+ H = np.array(polys)[:, 0] / qvals
+ if debug_plot:
+ plot_reg_multiple(
+ [xvals] * len(qvals),
+ [yvals[:, qi] / qvals[qi] for qi in range(len(qvals))],
+ [p / q for p, q in zip(polys, qvals)],
+ x_label="log(x)", y_label="$\\log(c_q(x)) / q$",
+ data_labels=["q = %d" % q for q in qvals],
+ reg_labels=["reg. line (H = {:.3f})".format(h) for h in H],
+ fname=plot_file
+ )
+ if debug_data:
+ return H, (xvals, yvals, polys)
+ else:
+ return H
+
+
+
+def _genhurst(S, q):
+ """
+ Computes the generalized hurst exponent H_q for time series S.
+
+ This function should not be used. It is only kept here to demonstrate that
+ ``mfhurst_dm`` is implemented correctly. You can use the following call to
+ get the exact same result:
+
+ ``mfhurst_dm(S, [q])``
+
+ Reference code:
+ .. [gh_a] Tomaso Aste, "Generalized Hurst exponent",
+ url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent
+ .. [gh_b] Peter Rupprecht, "GenHurst",
+ url: https://github.com/PTRRupprecht/GenHurst
+
+ Below you can find the original documentation by T. Aste:
+
+ ####################################
+ # Calculates the generalized Hurst exponent H(q) from the scaling
+ # of the renormalized q-moments of the distribution
+ #
+ # <|x(t+r)-x(t)|^q>/<x(t)^q> ~ r^[qH(q)]
+ #
+ ####################################
+ # H = genhurst(S,q)
+ # S is 1xT data series (T>50 recommended)
+ # calculates H, specifies the exponent q
+ #
+ # example:
+ # generalized Hurst exponent for a random vector
+ # H=genhurst(np.random.rand(10000,1),3)
+ #
+ ####################################
+ # for the generalized Hurst exponent method please refer to:
+ #
+ # T. Di Matteo et al. Physica A 324 (2003) 183-188
+ # T. Di Matteo et al. Journal of Banking & Finance 29 (2005) 827-851
+ # T. Di Matteo Quantitative Finance, 7 (2007) 21-36
+ #
+ ####################################
+ ## written in Matlab : Tomaso Aste, 30/01/2013 ##
+ ## translated to Python (3.6) : Peter Rupprecht, p.t.r.rupprecht (AT) gmail.com, 25/05/2017 ##
+ ## formatting and datatype fixes : Christopher Schölzel, 17/02/2019 ##
+ """
+ L = len(S)
+ if L < 100:
+ warnings.warn('Data series very short!')
+ H = np.zeros((len(range(5, 20)), 1))
+ k = 0
+
+ for Tmax in range(5, 20):
+
+ x = np.arange(1, Tmax+1, 1)
+ mcord = np.zeros((Tmax, 1))
+
+ for tt in range(1, Tmax+1):
+ dV = S[np.arange(tt, L, tt)] - S[np.arange(tt, L, tt)-tt]
+ VV = S[np.arange(tt, L+tt, tt)-tt]
+ N = len(dV) + 1
+ X = np.arange(1, N+1, dtype=np.float64)
+ Y = VV
+ mx = np.sum(X)/N
+ SSxx = np.sum(X**2) - N*mx**2
+ my = np.sum(Y)/N
+ SSxy = np.sum(np.multiply(X, Y)) - N*mx*my
+ cc1 = SSxy/SSxx
+ cc2 = my - cc1*mx
+ ddVd = dV - cc1
+ VVVd = VV - np.multiply(cc1, np.arange(1, N+1, dtype=np.float64)) \
+ - cc2
+ mcord[tt-1] = np.mean(np.abs(ddVd)**q)/np.mean(np.abs(VVVd)**q)
+ mx = np.mean(np.log10(x))
+ SSxx = np.sum(np.log10(x)**2) - Tmax*mx**2
+ my = np.mean(np.log10(mcord))
+ SSxy = np.sum(
+ np.multiply(
+ np.log10(x), np.transpose(np.log10(mcord))
+ )
+ ) - Tmax*mx*my
+ H[k] = SSxy/SSxx
+ k = k + 1
+ mH = np.mean(H)/q
+
+ return mH
+
+
+def _aste_line_fit(x, y):
+ """
+ Simple linear regression with ordinary least squares
+ https://en.wikipedia.org/wiki/Simple_linear_regression
+
+ NOTE: this function is left here to demonstrate the correctness of
+ T. Aste's MATLAB code for hurst_multifractal_dm. You can get the same
+ results with a call to ``np.polyfit(x, y, 1)[::-1]``.
+ """
+ # convert to float to avoid integer overflow problems
+ x = np.asarray(x, dtype=np.float64)
+ y = np.asarray(y, dtype=np.float64)
+ N = len(x)
+ mx = np.mean(x)
+ my = np.mean(y)
+ # calculate the variance in x
+ # sum((x - mx) ^ 2) = sum(x ^ 2) - 2 * sum(x * mx) + N * mx ^ 2
+ # = sum(x ^ 2) - 2 * mx * sum(x) + N * mx ^ 2
+ # = sum(x ^ 2) - 2 * mx * N * mx + N * mx ^ 2
+ # = sum(x ^ 2) - N * mx ^ 2
+ var = np.sum(x ** 2) - N * mx * mx
+ # corvariance of x and y
+ # sum((x - mx) * (y - my))
+ # = sum(xy) - sum(mx * y) - sum(my * x) + N * mx * my
+ # = sum(xy) - mx * sum(y) - my * sum(x) + N * mx * my
+ # = sum(xy) - mx * my * N - my * mx * N + N * mx * my
+ # = sum(xy) - N * mx * my
+ # NOTE: T. Aste's code is a little confusing here
+ # X = 1:N;
+ # Y = S(((tt+1):tt:(L+tt))-tt)';
+ # ...
+ # SSxy = sum(X.*Y) - N*mx*my;
+ # Here, Y is transposed and the multiplication for SSxy uses .* instead of *.
+ # This suggests that we have a matrix multiplication with (possible)
+ # broadcasting. If X was an array and not a range, we would have a NxN array
+ # as a result since size(X) = [1, N] and size(Y) = [N, 1]. Ranges behave
+ # differently in MATLAB and this is the only reason why we get the correct
+ # result here.
+ cov = np.sum(x * y) - N * mx * my
+ # calculate slope and intercept (this is correct again)
+ slope = cov / var
+ intercept = my - slope * mx
+ return [intercept, slope]
+
+
+
+[docs]
+def mfhurst_dm(data, qvals=None, max_dists=range(5, 20), detrend=True,
+ fit="poly", debug_plot=False, debug_data=False, plot_file=None):
+ """
+ Calculates the Generalized Hurst Exponent H_q for different q according to
+ the MATLAB code of Tomaso Aste - one of the authors that introduced this
+ measure.
+
+ Explanation of the General Hurst Exponent:
+ See mfhurst_b.
+
+ Warning: I do not recommend to use this function unless you want to reproduce
+ examples from Di Matteo et al.. From my experiments and a critical code
+ analysis it seems that mfhurst_b should provide more robust results.
+
+ The design choices that make mfhurst_dm different than mfhurst_d are the
+ following:
+
+ - By default, a linear trend is removed from the data. This can be sensible
+ in some application areas (such as stock market analysis), but I think
+ this should be an additional preprocessing step and not part of this
+ algorithm.
+ - In the calculation of the height-height correlations, the differences
+ (h(x) - h(x + d) are not calculated for every possible x from 0 to N-d-1,
+ but instead d is used as a step size for x. I see no justification for
+ this choice. It makes the algorithm run faster, but it also takes away
+ a lot of statistical robustness, especially for large values of d.
+ This effect can be clearly seen when setting `debug_plot` to `True`.
+ - The algorithm uses a linear scale for the distance values d = 1, 2, 3,
+ ..., tau_max. This is counter intuitive, since we later plot log(d)
+ against log(c_q(d)). A linear scale will have a bias towards larger
+ values in the logarithmic scale. A logarithmic scale for d seems to be
+ a more natural fit. If low values of d yield statistically unstable
+ results, they should simply be omitted.
+ - The algorithm tests multiple values for tau_max, which is the maximum
+ distance that will be calculated. In [mhd_1]_ the authors state that this
+ is done to test the robustness of the approach. However, taking the
+ mean of several runs with different tau_max will not produce any more
+ information than performing one run with the largest tau_max. Instead
+ it will only introduce a bias towards low values for d.
+
+ References:
+ .. [mhd_1] T. Di Matteo, T. Aste, and M. M. Dacorogna, “Scaling behaviors
+ in differently developed markets,” Physica A: Statistical Mechanics
+ and its Applications, vol. 324, no. 1–2, pp. 183–188, 2003.
+
+ Reference code:
+ .. [mhd_a] Tomaso Aste, "Generalized Hurst exponent",
+ url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent
+
+ Args:
+ data (1d-vector of float):
+ input data (should be evenly sampled)
+ qvals (1d-vector of float)
+ values of q for which H_q should be calculated (default: [1])
+
+ Kwargs:
+ max_dists (1d-vector of int):
+ different values to test for tau_max, the maximum value for the distance
+ d. The resulting H_q will be a mean of all H_q calculated with tau_max
+ = max_dists[0], max_dists[1], ... .
+ detrend (boolean):
+ if True, a linear trend will be removed from the data before H_q will
+ be calculated
+ fit (str):
+ the fitting method to use for the line fit, either 'poly' for normal
+ least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
+ is more robust to outliers
+ debug_plot (boolean):
+ if True, a simple plot of the final line-fitting step will be shown
+ debug_data (boolean):
+ if True, debugging data will be returned alongside the result
+ plot_file (str):
+ if debug_plot is True and plot_file is not None, the plot will be saved
+ under the given file name instead of directly showing it through
+ ``plt.show()``
+
+ Returns:
+ array of float:
+ array of mH_q for every q given in ``qvals`` where mH_q is the mean of
+ all H_q calculated for different max distances in max_dists.
+ array of float:
+ array of standard deviations sH_q for each mH_q returned
+ (1d-vector, 2d-vector, 2d-vector):
+ only present if debug_data is True: debug data of the form
+ ``(xvals, yvals, poly)`` where ``xvals`` is the logarithm of ``dists``,
+ ``yvals`` are the logarithms of the corresponding height-height-
+ correlations for each distance (first dimension) and each q
+ (second dimension) in the shape len(dists) x len(qvals) and ``poly`` are
+ the line coefficients (``[slope, intercept]``) for each q in the shape
+ len(qvals) x 2.
+ """
+ # transform to array if necessary
+ data = np.asarray(data)
+ if qvals is None:
+ # actual default parameter would introduce shared list
+ # see: http://pylint-messages.wikidot.com/messages:w0102
+ qvals = [1]
+ if len(data) < 60:
+ warnings.warn(
+ "H(q) is not reliable for small time series ({} < 60)".format(len(data))
+ )
+ max_max_dist = np.max(max_dists)
+ hhcorr = []
+ # NOTE: I don't think it's a good idea to use a linear scale for the distance
+ # values. Our fit is in logarithmic space, so this will place more weight on
+ # the higher distance. This is not bad per se, but if you think that the
+ # first values are unreliable, it would be better to skip them alltogether.
+ for dist in range(1, max_max_dist+1):
+ # NOTE: I don't think applying a step size to the input data is reasonable.
+ # I cannot find any justification for this in the papers and reduces the
+ # number of points that we can use to make our mean statistically stable.
+ step_size = dist
+ stepdata = data[::step_size]
+ if detrend:
+ stepdata = detrend_data(stepdata, order=1)
+ diffs = stepdata[1:] - stepdata[:-1]
+ hhcorr.append([
+ np.mean(np.abs(diffs) ** q) / np.mean(np.abs(stepdata) ** q)
+ for q in qvals
+ ])
+ hhcorr = np.array(hhcorr, dtype=np.float64)
+ xvals = np.log(np.arange(1, max_max_dist+1))
+ yvals = np.log(hhcorr)
+ # NOTE: Using several maximum distances seems to be a strange way to
+ # introduce stability, since it only places emphasis on the lower distance
+ # ranges and does not introduce any new information.
+ H = np.array([
+ poly_fit(xvals[:md], yvals[:md, qi], 1, fit=fit)[0]
+ for qi in range(len(qvals))
+ for md in max_dists
+ ], dtype=np.float64).reshape(len(qvals), len(max_dists))
+ if debug_plot:
+ polys = [
+ np.array(poly_fit(xvals, yvals[:, qi], 1)) / qvals[qi]
+ for qi in range(len(qvals))
+ ]
+ plot_reg_multiple(
+ [xvals] * len(qvals),
+ [yvals[:, qi] / qvals[qi] for qi in range(len(qvals))],
+ polys,
+ x_label="log(x)", y_label="$\\log(c_q(x)) / q$",
+ data_labels=["q = %d" % q for q in qvals],
+ reg_labels=["reg. line (H = {:.3f})".format(h) for h in H[:, -1] / qvals],
+ fname=plot_file
+ )
+ mH = np.mean(H, axis=1) / qvals
+ sH = np.mean(H, axis=1) / qvals
+ if debug_data:
+ return [mH, sH, (xvals, yvals, polys)]
+ else:
+ return [mH, sH]
+
+
+
+
+[docs]
+def corr_dim(data, emb_dim, lag=1, rvals=None, dist=rowwise_euclidean,
fit="RANSAC", debug_plot=False, debug_data=False, plot_file=None):
- """
+ """
Calculates the correlation dimension with the Grassberger-Procaccia algorithm
Explanation of correlation dimension:
@@ -1335,9 +1944,12 @@ Source code for nolds.measures
This version of the algorithm is created for one-dimensional (scalar) time
series. Therefore, before calculating C(r), a delay embedding of the time
series is performed to yield emb_dim dimensional vectors
- Y_i = [X_i, X_(i+1), X_(i+2), ... X_(i+embd_dim-1)]. Choosing a higher
- value for emb_dim allows to reconstruct higher dimensional dynamics and
- avoids "systematic errors due to corrections to scaling".
+ Y_i = [X_i, X_(i+1*lag), X_(i+2*lag), ... X_(i+(embd_dim-1)*lag)]. Choosing
+ a higher value for emb_dim allows to reconstruct higher dimensional dynamics
+ and avoids "systematic errors due to corrections to scaling". Choosing a
+ higher value for lag allows to avoid overestimating correlation because
+ X_i ~= X_i+1, but it should also not be set too high to not underestimate
+ correlation due to exponential divergence of trajectories in chaotic systems.
References:
.. [cd_1] P. Grassberger and I. Procaccia, “Characterization of strange
@@ -1385,23 +1997,39 @@ Source code for nolds.measures
correlation dimension as slope of the line fitted to log(r) vs log(C(r))
(1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
- ``(rvals, csums, poly)`` where ``rvals`` are the values used for log(r),
- ``csums`` are the corresponding log(C(r)) and ``poly`` are the line
+ ``(rvals, csums, poly)`` where ``rvals`` are the values used for log(r),
+ ``csums`` are the corresponding log(C(r)) and ``poly`` are the line
coefficients (``[slope, intercept]``)
"""
+ # TODO determine lag in units of time instead of number of datapoints
data = np.asarray(data)
# TODO what are good values for r?
# TODO do this for multiple values of emb_dim?
if rvals is None:
- sd = np.std(data)
+ sd = np.std(data, ddof=1)
rvals = logarithmic_r(0.1 * sd, 0.5 * sd, 1.03)
- n = len(data)
- orbit = delay_embedding(data, emb_dim, lag=1)
- dists = np.array([dist(orbit, orbit[i]) for i in range(len(orbit))])
+ orbit = delay_embedding(data, emb_dim, lag=lag)
+ n = len(orbit)
+ dists = np.zeros((len(orbit), len(orbit)), dtype=np.float64)
+ for i in range(len(orbit)):
+ # calculate distances between X_i and X_i+1, X_i+2, ... , X_n-1
+ # NOTE: strictly speaking, [cd_1] does not specify to exclude self-matches
+ # however, since both [cd_2] and [cd_3] specify to only compare i with j != i
+ # or j > i respectively, it is safe to assume that this was an oversight in
+ # [cd_1]
+ d = dist(orbit[i+1:], orbit[i])
+ dists[i+1:,i] = d # fill column i
+ dists[i,i+1:] = d # fill row i
csums = []
for r in rvals:
- s = 1.0 / (n * (n - 1)) * np.sum(dists < r)
+ # NOTE: The [cd_1] and [cd_2] both use the factor 1/N^2 here.
+ # However, since we only use these values to fit a line in a log-log plot
+ # any multiplicative constant doesn't change the result since it will
+ # only result in an offset on the y-axis. Also, [cd_3] has a point here
+ # in that if we exclude self-matches in the numerator, it makes sense to
+ # also exclude self-matches from the denominator.
+ s = 1.0 / (n * (n - 1)) * np.sum(dists <= r)
csums.append(s)
csums = np.array(csums)
# filter zeros from csums
@@ -1412,7 +2040,7 @@ Source code for nolds.measures
# all sums are zero => we cannot fit a line
poly = [np.nan, np.nan]
else:
- poly = poly_fit(np.log(rvals), np.log(csums), 1)
+ poly = poly_fit(np.log(rvals), np.log(csums), 1, fit=fit)
if debug_plot:
plot_reg(np.log(rvals), np.log(csums), poly, "log(r)", "log(C(r))",
fname=plot_file)
@@ -1421,16 +2049,31 @@ Source code for nolds.measures
else:
return poly[0]
-[docs]def dfa(data, nvals=None, overlap=True, order=1, fit_trend="poly",
+
+
+def detrend_data(data, order=1, fit="poly"):
+ """
+ Removes a trend of given order from the data.
+ """
+ # TODO also use this function in dfa
+ xvals = np.arange(len(data))
+ trend = poly_fit(xvals, data, order, fit=fit)
+ detrended = data - np.polyval(trend, xvals)
+ return detrended
+
+
+
+[docs]
+def dfa(data, nvals=None, overlap=True, order=1, fit_trend="poly",
fit_exp="RANSAC", debug_plot=False, debug_data=False, plot_file=None):
- """
+ """
Performs a detrended fluctuation analysis (DFA) on the given data
Recommendations for parameter settings by Hardstone et al.:
* nvals should be equally spaced on a logarithmic scale so that each window
scale hase the same weight
* min(nvals) < 4 does not make much sense as fitting a polynomial (even if
- it is only of order 1) to 3 or less data points is very prone.
+ it is only of order 1) to 3 or less data points is very prone to errors.
* max(nvals) > len(data) / 10 does not make much sense as we will then have
less than 10 windows to calculate the average fluctuation
* use overlap=True to obtain more windows and therefore better statistics
@@ -1438,51 +2081,74 @@ Source code for nolds.measures
Explanation of DFA:
Detrended fluctuation analysis, much like the Hurst exponent, is used to
- find long-term statistical dependencies in time series.
-
- The idea behind DFA originates from the definition of self-affine
- processes. A process X is said to be self-affine if the standard deviation
- of the values within a window of length n changes with the window length
- factor L in a power law:
-
- std(X,L * n) = L^H * std(X, n)
-
- where std(X, k) is the standard deviation of the process X calculated over
- windows of size k. In this equation, H is called the Hurst parameter, which
- behaves indeed very similar to the Hurst exponent.
-
- Like the Hurst exponent, H can be obtained from a time series by
- calculating std(X,n) for different n and fitting a straight line to the
- plot of log(std(X,n)) versus log(n).
-
- To calculate a single std(X,n), the time series is split into windows of
- equal length n, so that the ith window of this size has the form
-
- W_(n,i) = [x_i, x_(i+1), x_(i+2), ... x_(i+n-1)]
-
- The value std(X,n) is then obtained by calculating std(W_(n,i)) for each i
- and averaging the obtained values over i.
-
- The aforementioned definition of self-affinity, however, assumes that the
- process is non-stationary (i.e. that the standard deviation changes over
- time) and it is highly influenced by local and global trends of the time
- series.
-
- To overcome these problems, an estimate alpha of H is calculated by using a
- "walk" or "signal profile" instead of the raw time series. This walk is
- obtained by substracting the mean and then taking the cumulative sum of the
- original time series. The local trends are removed for each window
- separately by fitting a polynomial p_(n,i) to the window W_(n,i) and then
- calculating W'_(n,i) = W_(n,i) - p_(n,i) (element-wise substraction).
-
- We then calculate std(X,n) as before only using the "detrended" window
- W'_(n,i) instead of W_(n,i). Instead of H we obtain the parameter alpha
- from the line fitting.
-
- For alpha < 1 the underlying process is stationary and can be modelled as
- fractional Gaussian noise with H = alpha. This means for alpha = 0.5 we
- have no correlation or "memory", for 0.5 < alpha < 1 we have a memory with
- positive correlation and for alpha < 0.5 the correlation is negative.
+ find long-term statistical dependencies in time series. However, while the
+ Hurst exponent will indicate long-term correlations for any non-stationary
+ process (i.e. a stochastic process whose probability distribution changes
+ when shifted in time, such as a random walk whose mean changes over time),
+ DFA was designed to distinguish between correlations that are purely an
+ artifact of non-stationarity and those that show inherent long-term
+ behavior of the studied system.
+
+ Mathematically, the long-term correlations that we are interested in can
+ be characterized using the autocorrelation function C(s). For a time series
+ (x_i) with i = 1, ..., N it is defined as follows:
+
+ C(s) = 1/(N-s) * (y_1 * y_1+s + y_2 * y_2+s + ... y_(N-s) * y_N)
+
+ with y_i = x_i - mean(x). If there are no correlations at all, C(s) would
+ be zero for s > 0. For short-range correlations, C(s) will decline
+ exponentially, but for long-term correlations the decline follows a power
+ law of the form C(s) ~ s^(-gamma) instead with 0 < gamma < 1.
+
+ Due to noise and underlying trends, calculating C(s) directly is usually not
+ feasible. The main idea of DFA is therefore to remove trends up to a given
+ order from the input data and analyze the remaining fluctuations. Trends
+ in this sense are smooth signals with monotonous or slowly oscillating
+ behavior that are caused by external effects and not the dynamical system
+ under study.
+
+ To get a hold of these trends, the first step is to calculate the "profile"
+ of our time series as the cumulative sum of deviations from the mean,
+ effectively integrating our data. This both smoothes out measurement noise
+ and makes it easier to distinguish the fractal properties of bounded time
+ series (i.e. time series whose values cannot grow or shrink beyond certain
+ bounds such as most biological or physical signals) by applying random walk
+ theory (see [dfa_3]_ and [dfa_4]_).
+
+ y_i = x_1 - mean(x) + x_2 - mean(x) + ... + x_i - mean(x).
+
+ After that, we split Y(i) into (usually non-overlapping) windows of length
+ n to calculate local trends at this given scale. The ith window of this
+ size has the form
+
+ W_(n,i) = [y_i, y_(i+1), y_(i+2), ... y_(i+n-1)]
+
+ The local trends are then removed for each window separately by fitting a
+ polynomial p_(n,i) to the window W_(n,i) and then calculating
+ W'_(n,i) = W_(n,i) - p_(n,i) (element-wise subtraction).
+
+ This leaves us with the deviations from the trend - the "fluctuations" -
+ that we are interested in. To quantify them, we take the root mean square
+ of these fluctuations. It is important to note that we have to sum up all
+ individual fluctuations across all windows and divide by the total number
+ of fluctuations here before finally taking the root as last step. Some
+ implementations apply another root per window, which skews the result.
+
+ The resulting fluctuation F(n) is then only dependent on the window size n,
+ the scale at which we observe our data. It behaves similar to the
+ autocorrelation function in that it follows a power-law for long-term
+ correlations:
+
+ F(n) ~ n^alpha
+
+ Where alpha is the Hurst parameter, which we can obtain from fitting a line
+ into the plot of log(n) versus log(F(n)) and taking the slope.
+
+ The result can be interpreted as follows: For alpha < 1 the underlying
+ process is stationary and can be modelled as fractional Gaussian noise with
+ H = alpha. This means for alpha = 0.5 we have no long-term correlation or
+ "memory", for 0.5 < alpha < 1 we have positive long-term correlations and
+ for alpha < 0.5 the long-term correlations are negative.
For alpha > 1 the underlying process is non-stationary and can be modeled
as fractional Brownian motion with H = alpha - 1.
@@ -1491,7 +2157,23 @@ Source code for nolds.measures
.. [dfa_1] C.-K. Peng, S. V. Buldyrev, S. Havlin, M. Simons,
H. E. Stanley, and A. L. Goldberger, “Mosaic organization of
DNA nucleotides,” Physical Review E, vol. 49, no. 2, 1994.
- .. [dfa_2] R. Hardstone, S.-S. Poil, G. Schiavone, R. Jansen,
+ .. [dfa_2] J. W. Kantelhardt, E. Koscielny-Bunde, H. H. A. Rego, S.
+ Havlin, and A. Bunde, “Detecting long-range correlations with
+ detrended fluctuation analysis,” Physica A: Statistical
+ Mechanics and its Applications, vol. 295, no. 3–4, pp. 441–454,
+ Jun. 2001, doi: 10.1016/S0378-4371(01)00144-3.
+ .. [dfa_3] C. Peng, J. M. Hausdorff, and A. L. Goldberger, “Fractal
+ mechanisms in neuronal control: human heartbeat and gait
+ dynamics in health and disease,” in Self-Organized Biological
+ Dynamics and Nonlinear Control, 1st ed., J. Walleczek, Ed.,
+ Cambridge University Press, 2000, pp. 66–96.
+ doi: 10.1017/CBO9780511535338.006.
+ .. [dfa_4] A. Bashan, R. Bartsch, J. W. Kantelhardt, and S. Havlin,
+ “Comparison of detrending methods for fluctuation analysis,”
+ Physica A: Statistical Mechanics and its Applications, vol. 387,
+ no. 21, pp. 5080–5090, Sep. 2008,
+ doi: 10.1016/j.physa.2008.04.023.
+ .. [dfa_5] R. Hardstone, S.-S. Poil, G. Schiavone, R. Jansen,
V. V. Nikulin, H. D. Mansvelder, and K. Linkenkaer-Hansen,
“Detrended fluctuation analysis: A scale-free view on neuronal
oscillations,” Frontiers in Physiology, vol. 30, 2012.
@@ -1555,7 +2237,7 @@ Source code for nolds.measures
nvals = [total_N-2, total_N-1]
msg = "choosing nvals = {} , DFA with less than ten data points is " \
+ "extremely unreliable"
- warnings.warn(msg.format(nvals),RuntimeWarning)
+ warnings.warn(msg.format(nvals), RuntimeWarning)
if len(nvals) < 2:
raise ValueError("at least two nvals are needed")
if np.min(nvals) < 2:
@@ -1582,10 +2264,13 @@ Source code for nolds.measures
for i in range(len(d))]
tpoly = np.array(tpoly)
trend = np.array([np.polyval(tpoly[i], x) for i in range(len(d))])
- # calculate standard deviation ("fluctuation") of walks in d around trend
- flucs = np.sqrt(np.sum((d - trend) ** 2, axis=1) / n)
- # calculate mean fluctuation over all subsequences
- f_n = np.sum(flucs) / len(flucs)
+ # calculate mean-square differences for each walk in d around trend
+ flucs = np.sum((d - trend) ** 2, axis=1) / n
+ # take another mean across all walks and finally take the square root of that
+ # NOTE: To map this to the formula in Peng1995, observe that this simplifies
+ # to np.sqrt(np.sum((d - trend) ** 2) / total_N) if we have non-overlapping
+ # windows and the last window matches the end of the data perfectly.
+ f_n = np.sqrt(np.sum(flucs) / len(flucs))
fluctuations.append(f_n)
fluctuations = np.array(fluctuations)
# filter zeros from fluctuations
@@ -1605,13 +2290,41 @@ Source code for nolds.measures
return (poly[0], (np.log(nvals), np.log(fluctuations), poly))
else:
return poly[0]
+
+
-
-
+
+
+Nolds
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Navigation
+
+
+
Related Topics
- Documentation overview
@@ -1620,26 +2333,24 @@ Related Topics
-
- Quick search
-
-
-
+
+
+
+
+
+
+
+
- ©2016-2018, Christopher Schölzel.
+ ©2016-2024, Christopher Schölzel.
|
- Powered by Sphinx 1.6.6
- & Alabaster 0.7.10
+ Powered by Sphinx 8.0.2
+ & Alabaster 1.0.0
diff --git a/_sources/examples.rst.txt b/_sources/examples.rst.txt
index 7ee0fb8..d89c5b7 100644
--- a/_sources/examples.rst.txt
+++ b/_sources/examples.rst.txt
@@ -15,11 +15,15 @@ You can run some examples for the functions in nolds with the command
* ``hurst-hist`` plots a histogram of hurst exponents obtained for random noise.
* ``hurst-nvals`` creates a plot that compares the results of different choices for nvals
for the function ``hurst_rs``.
+* ``sampen-tol`` compares old and new default tolerance values for ``sampen``.
+* ``hurst_mf_stock`` example function recreates a plot from Di Matteo (2003).
+* ``barabasi_1991_figure2`` and ``barabasi_1991_figure3`` recreate the respective plots from Barabasi et al. (1991)
+* ``lorenz`` calculates all main measures of ``nolds`` for x, y, and z coordinates of a Lorenz plot and compares them to prescribed values from the literature.
These tests are also available as functions inside the module ``nolds.examples``.
Functions in ``nolds.examples``
------------------------------
+-------------------------------
.. autofunction:: nolds.examples.plot_lyap
.. autofunction:: nolds.examples.profiling
diff --git a/_sources/nolds.rst.txt b/_sources/nolds.rst.txt
index b74083a..ecbb1da 100644
--- a/_sources/nolds.rst.txt
+++ b/_sources/nolds.rst.txt
@@ -35,6 +35,14 @@ Detrended fluctuation analysis
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: nolds.dfa
+Generalized Hurst Exponent (Barabási et al.)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: nolds.mfhurst_b
+
+Generalized Hurst Exponent (Di Matteo et al.)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: nolds.mfhurst_dm
+
Helper functions
-----------------
.. autofunction:: nolds.binary_n
@@ -87,7 +95,7 @@ Benchmark dataset for hurst exponent
View of Cycles, Prices, and Market Volatility”, Wiley: Hoboken,
2nd Edition, 1996.
.. [b7_b] Ian L. Kaplan, "Estimating the Hurst Exponent",
- url: http://www.bearcave.com/misl/misl_tech/wavelets/hurst/
+ url: http://bearcave.com/misl/misl_tech/wavelets/hurst/index.html
.. [b7_c] HwB, "Pracma: brown72",
url: https://www.rdocumentation.org/packages/pracma/versions/1.9.9/topics/brown72
@@ -111,3 +119,11 @@ Quantum random numbers
~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: nolds.qrandom
.. autofunction:: nolds.load_qrandom
+
+Financial example datasets
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: nolds.load_financial
+
+Fractal data used by Barabasi et al. (1991)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: nolds.barabasi1991_fractal
diff --git a/_static/alabaster.css b/_static/alabaster.css
index 8fd370c..067f2fd 100644
--- a/_static/alabaster.css
+++ b/_static/alabaster.css
@@ -1,61 +1,7 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-@import url("basic.css");
-
/* -- page layout ----------------------------------------------------------- */
body {
- font-family: 'goudy old style', 'minion pro', 'bell mt', Georgia, 'Hiragino Mincho Pro', serif;
+ font-family: Georgia, serif;
font-size: 17px;
background-color: #fff;
color: #000;
@@ -121,6 +67,11 @@ div.relations {
}
+div.sphinxsidebar {
+ max-height: 100%;
+ overflow-y: auto;
+}
+
div.sphinxsidebar a {
color: #444;
text-decoration: none;
@@ -159,7 +110,7 @@ div.sphinxsidebarwrapper p.blurb {
div.sphinxsidebar h3,
div.sphinxsidebar h4 {
- font-family: 'Garamond', 'Georgia', serif;
+ font-family: Georgia, serif;
color: #444;
font-size: 24px;
font-weight: normal;
@@ -203,10 +154,18 @@ div.sphinxsidebar ul li.toctree-l2 > a {
div.sphinxsidebar input {
border: 1px solid #CCC;
- font-family: 'goudy old style', 'minion pro', 'bell mt', Georgia, 'Hiragino Mincho Pro', serif;
+ font-family: Georgia, serif;
font-size: 1em;
}
+div.sphinxsidebar #searchbox {
+ margin: 1em 0;
+}
+
+div.sphinxsidebar .search > div {
+ display: table-cell;
+}
+
div.sphinxsidebar hr {
border: none;
height: 1px;
@@ -218,6 +177,19 @@ div.sphinxsidebar hr {
width: 50%;
}
+div.sphinxsidebar .badge {
+ border-bottom: none;
+}
+
+div.sphinxsidebar .badge:hover {
+ border-bottom: none;
+}
+
+/* To address an issue with donation coming after search */
+div.sphinxsidebar h3.donation {
+ margin-top: 10px;
+}
+
/* -- body styles ----------------------------------------------------------- */
a {
@@ -236,7 +208,7 @@ div.body h3,
div.body h4,
div.body h5,
div.body h6 {
- font-family: 'Garamond', 'Georgia', serif;
+ font-family: Georgia, serif;
font-weight: normal;
margin: 30px 0px 10px 0px;
padding: 0;
@@ -277,7 +249,7 @@ div.admonition tt.xref, div.admonition code.xref, div.admonition a tt {
}
div.admonition p.admonition-title {
- font-family: 'Garamond', 'Georgia', serif;
+ font-family: Georgia, serif;
font-weight: normal;
font-size: 24px;
margin: 0 0 10px 0;
@@ -289,10 +261,6 @@ div.admonition p.last {
margin-bottom: 0;
}
-div.highlight {
- background-color: #fff;
-}
-
dt:target, .highlight {
background: #FAF3E8;
}
@@ -366,7 +334,7 @@ p.admonition-title:after {
}
pre, tt, code {
- font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+ font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
font-size: 0.9em;
}
@@ -458,7 +426,9 @@ table.footnote td {
}
dl {
- margin: 0;
+ margin-left: 0;
+ margin-right: 0;
+ margin-top: 0;
padding: 0;
}
@@ -478,7 +448,7 @@ ul, ol {
}
pre {
- background: #EEE;
+ background: unset;
padding: 7px 30px;
margin: 15px 0px;
line-height: 1.3em;
@@ -509,15 +479,15 @@ a.reference {
border-bottom: 1px dotted #004B6B;
}
+a.reference:hover {
+ border-bottom: 1px solid #6D4100;
+}
+
/* Don't put an underline on images */
a.image-reference, a.image-reference:hover {
border-bottom: none;
}
-a.reference:hover {
- border-bottom: 1px solid #6D4100;
-}
-
a.footnote-reference {
text-decoration: none;
font-size: 0.7em;
@@ -533,68 +503,7 @@ a:hover tt, a:hover code {
background: #EEE;
}
-
-@media screen and (max-width: 870px) {
-
- div.sphinxsidebar {
- display: none;
- }
-
- div.document {
- width: 100%;
-
- }
-
- div.documentwrapper {
- margin-left: 0;
- margin-top: 0;
- margin-right: 0;
- margin-bottom: 0;
- }
-
- div.bodywrapper {
- margin-top: 0;
- margin-right: 0;
- margin-bottom: 0;
- margin-left: 0;
- }
-
- ul {
- margin-left: 0;
- }
-
- li > ul {
- /* Matches the 30px from the "ul, ol" selector above */
- margin-left: 30px;
- }
-
- .document {
- width: auto;
- }
-
- .footer {
- width: auto;
- }
-
- .bodywrapper {
- margin: 0;
- }
-
- .footer {
- width: auto;
- }
-
- .github {
- display: none;
- }
-
-
-
-}
-
-
-
-@media screen and (max-width: 875px) {
+@media screen and (max-width: 1100px) {
body {
margin: 0;
@@ -604,12 +513,16 @@ a:hover tt, a:hover code {
div.documentwrapper {
float: none;
background: #fff;
+ margin-left: 0;
+ margin-top: 0;
+ margin-right: 0;
+ margin-bottom: 0;
}
div.sphinxsidebar {
display: block;
float: none;
- width: 102.5%;
+ width: unset;
margin: 50px -30px -20px -30px;
padding: 10px 20px;
background: #333;
@@ -644,8 +557,14 @@ a:hover tt, a:hover code {
div.body {
min-height: 0;
+ min-width: auto; /* fixes width on small screens, breaks .hll */
padding: 0;
}
+
+ .hll {
+ /* "fixes" the breakage */
+ width: max-content;
+ }
.rtd_doc_footer {
display: none;
@@ -659,13 +578,18 @@ a:hover tt, a:hover code {
width: auto;
}
- .footer {
- width: auto;
- }
-
.github {
display: none;
}
+
+ ul {
+ margin-left: 0;
+ }
+
+ li > ul {
+ /* Matches the 30px from the "ul, ol" selector above */
+ margin-left: 30px;
+ }
}
@@ -675,19 +599,65 @@ a:hover tt, a:hover code {
display: none!important;
}
-/* Make nested-list/multi-paragraph items look better in Releases changelog
- * pages. Without this, docutils' magical list fuckery causes inconsistent
- * formatting between different release sub-lists.
- */
-div#changelog > div.section > ul > li > p:only-child {
- margin-bottom: 0;
-}
-
-/* Hide fugly table cell borders in ..bibliography:: directive output */
+/* Hide ugly table cell borders in ..bibliography:: directive output */
table.docutils.citation, table.docutils.citation td, table.docutils.citation th {
border: none;
/* Below needed in some edge cases; if not applied, bottom shadows appear */
-moz-box-shadow: none;
-webkit-box-shadow: none;
box-shadow: none;
+}
+
+
+/* relbar */
+
+.related {
+ line-height: 30px;
+ width: 100%;
+ font-size: 0.9rem;
+}
+
+.related.top {
+ border-bottom: 1px solid #EEE;
+ margin-bottom: 20px;
+}
+
+.related.bottom {
+ border-top: 1px solid #EEE;
+}
+
+.related ul {
+ padding: 0;
+ margin: 0;
+ list-style: none;
+}
+
+.related li {
+ display: inline;
+}
+
+nav#rellinks {
+ float: right;
+}
+
+nav#rellinks li+li:before {
+ content: "|";
+}
+
+nav#breadcrumbs li+li:before {
+ content: "\00BB";
+}
+
+/* Hide certain items when printing */
+@media print {
+ div.related {
+ display: none;
+ }
+}
+
+img.github {
+ position: absolute;
+ top: 0;
+ border: 0;
+ right: 0;
}
\ No newline at end of file
diff --git a/_static/basic.css b/_static/basic.css
index 6f40830..e5179b7 100644
--- a/_static/basic.css
+++ b/_static/basic.css
@@ -4,7 +4,7 @@
*
* Sphinx stylesheet -- basic theme.
*
- * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -15,6 +15,12 @@ div.clearer {
clear: both;
}
+div.section::after {
+ display: block;
+ content: '';
+ clear: left;
+}
+
/* -- relbar ---------------------------------------------------------------- */
div.related {
@@ -81,10 +87,26 @@ div.sphinxsidebar input {
font-size: 1em;
}
+div.sphinxsidebar #searchbox form.search {
+ overflow: hidden;
+}
+
div.sphinxsidebar #searchbox input[type="text"] {
- width: 170px;
+ float: left;
+ width: 80%;
+ padding: 0.25em;
+ box-sizing: border-box;
}
+div.sphinxsidebar #searchbox input[type="submit"] {
+ float: left;
+ width: 20%;
+ border-left: none;
+ padding: 0.25em;
+ box-sizing: border-box;
+}
+
+
img {
border: 0;
max-width: 100%;
@@ -108,7 +130,7 @@ ul.search li a {
font-weight: bold;
}
-ul.search li div.context {
+ul.search li p.context {
color: #888;
margin: 2px 0 0 30px;
text-align: left;
@@ -199,6 +221,11 @@ table.modindextable td {
/* -- general body styles --------------------------------------------------- */
+div.body {
+ min-width: inherit;
+ max-width: 800px;
+}
+
div.body p, div.body dd, div.body li, div.body blockquote {
-moz-hyphens: auto;
-ms-hyphens: auto;
@@ -210,6 +237,10 @@ a.headerlink {
visibility: hidden;
}
+a:visited {
+ color: #551A8B;
+}
+
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
@@ -240,19 +271,25 @@ p.rubric {
font-weight: bold;
}
-img.align-left, .figure.align-left, object.align-left {
+img.align-left, figure.align-left, .figure.align-left, object.align-left {
clear: left;
float: left;
margin-right: 1em;
}
-img.align-right, .figure.align-right, object.align-right {
+img.align-right, figure.align-right, .figure.align-right, object.align-right {
clear: right;
float: right;
margin-left: 1em;
}
-img.align-center, .figure.align-center, object.align-center {
+img.align-center, figure.align-center, .figure.align-center, object.align-center {
+ display: block;
+ margin-left: auto;
+ margin-right: auto;
+}
+
+img.align-default, figure.align-default, .figure.align-default {
display: block;
margin-left: auto;
margin-right: auto;
@@ -266,30 +303,45 @@ img.align-center, .figure.align-center, object.align-center {
text-align: center;
}
+.align-default {
+ text-align: center;
+}
+
.align-right {
text-align: right;
}
/* -- sidebars -------------------------------------------------------------- */
-div.sidebar {
+div.sidebar,
+aside.sidebar {
margin: 0 0 0.5em 1em;
border: 1px solid #ddb;
- padding: 7px 7px 0 7px;
+ padding: 7px;
background-color: #ffe;
width: 40%;
float: right;
+ clear: right;
+ overflow-x: auto;
}
p.sidebar-title {
font-weight: bold;
}
+nav.contents,
+aside.topic,
+div.admonition, div.topic, blockquote {
+ clear: left;
+}
+
/* -- topics ---------------------------------------------------------------- */
+nav.contents,
+aside.topic,
div.topic {
border: 1px solid #ccc;
- padding: 7px 7px 0 7px;
+ padding: 7px;
margin: 10px 0 10px 0;
}
@@ -311,10 +363,6 @@ div.admonition dt {
font-weight: bold;
}
-div.admonition dl {
- margin-bottom: 0;
-}
-
p.admonition-title {
margin: 0px 10px 5px 0px;
font-weight: bold;
@@ -325,13 +373,48 @@ div.body p.centered {
margin-top: 25px;
}
+/* -- content of sidebars/topics/admonitions -------------------------------- */
+
+div.sidebar > :last-child,
+aside.sidebar > :last-child,
+nav.contents > :last-child,
+aside.topic > :last-child,
+div.topic > :last-child,
+div.admonition > :last-child {
+ margin-bottom: 0;
+}
+
+div.sidebar::after,
+aside.sidebar::after,
+nav.contents::after,
+aside.topic::after,
+div.topic::after,
+div.admonition::after,
+blockquote::after {
+ display: block;
+ content: '';
+ clear: both;
+}
+
/* -- tables ---------------------------------------------------------------- */
table.docutils {
+ margin-top: 10px;
+ margin-bottom: 10px;
border: 0;
border-collapse: collapse;
}
+table.align-center {
+ margin-left: auto;
+ margin-right: auto;
+}
+
+table.align-default {
+ margin-left: auto;
+ margin-right: auto;
+}
+
table caption span.caption-number {
font-style: italic;
}
@@ -347,10 +430,6 @@ table.docutils td, table.docutils th {
border-bottom: 1px solid #aaa;
}
-table.footnote td, table.footnote th {
- border: 0 !important;
-}
-
th {
text-align: left;
padding-right: 5px;
@@ -365,22 +444,34 @@ table.citation td {
border-bottom: none;
}
+th > :first-child,
+td > :first-child {
+ margin-top: 0px;
+}
+
+th > :last-child,
+td > :last-child {
+ margin-bottom: 0px;
+}
+
/* -- figures --------------------------------------------------------------- */
-div.figure {
+div.figure, figure {
margin: 0.5em;
padding: 0.5em;
}
-div.figure p.caption {
+div.figure p.caption, figcaption {
padding: 0.3em;
}
-div.figure p.caption span.caption-number {
+div.figure p.caption span.caption-number,
+figcaption span.caption-number {
font-style: italic;
}
-div.figure p.caption span.caption-text {
+div.figure p.caption span.caption-text,
+figcaption span.caption-text {
}
/* -- field list styles ----------------------------------------------------- */
@@ -405,6 +496,74 @@ table.field-list td, table.field-list th {
hyphens: manual;
}
+/* -- hlist styles ---------------------------------------------------------- */
+
+table.hlist {
+ margin: 1em 0;
+}
+
+table.hlist td {
+ vertical-align: top;
+}
+
+/* -- object description styles --------------------------------------------- */
+
+.sig {
+ font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+}
+
+.sig-name, code.descname {
+ background-color: transparent;
+ font-weight: bold;
+}
+
+.sig-name {
+ font-size: 1.1em;
+}
+
+code.descname {
+ font-size: 1.2em;
+}
+
+.sig-prename, code.descclassname {
+ background-color: transparent;
+}
+
+.optional {
+ font-size: 1.3em;
+}
+
+.sig-paren {
+ font-size: larger;
+}
+
+.sig-param.n {
+ font-style: italic;
+}
+
+/* C++ specific styling */
+
+.sig-inline.c-texpr,
+.sig-inline.cpp-texpr {
+ font-family: unset;
+}
+
+.sig.c .k, .sig.c .kt,
+.sig.cpp .k, .sig.cpp .kt {
+ color: #0033B3;
+}
+
+.sig.c .m,
+.sig.cpp .m {
+ color: #1750EB;
+}
+
+.sig.c .s, .sig.c .sc,
+.sig.cpp .s, .sig.cpp .sc {
+ color: #067D17;
+}
+
+
/* -- other body styles ----------------------------------------------------- */
ol.arabic {
@@ -427,11 +586,81 @@ ol.upperroman {
list-style: upper-roman;
}
+:not(li) > ol > li:first-child > :first-child,
+:not(li) > ul > li:first-child > :first-child {
+ margin-top: 0px;
+}
+
+:not(li) > ol > li:last-child > :last-child,
+:not(li) > ul > li:last-child > :last-child {
+ margin-bottom: 0px;
+}
+
+ol.simple ol p,
+ol.simple ul p,
+ul.simple ol p,
+ul.simple ul p {
+ margin-top: 0;
+}
+
+ol.simple > li:not(:first-child) > p,
+ul.simple > li:not(:first-child) > p {
+ margin-top: 0;
+}
+
+ol.simple p,
+ul.simple p {
+ margin-bottom: 0;
+}
+
+aside.footnote > span,
+div.citation > span {
+ float: left;
+}
+aside.footnote > span:last-of-type,
+div.citation > span:last-of-type {
+ padding-right: 0.5em;
+}
+aside.footnote > p {
+ margin-left: 2em;
+}
+div.citation > p {
+ margin-left: 4em;
+}
+aside.footnote > p:last-of-type,
+div.citation > p:last-of-type {
+ margin-bottom: 0em;
+}
+aside.footnote > p:last-of-type:after,
+div.citation > p:last-of-type:after {
+ content: "";
+ clear: both;
+}
+
+dl.field-list {
+ display: grid;
+ grid-template-columns: fit-content(30%) auto;
+}
+
+dl.field-list > dt {
+ font-weight: bold;
+ word-break: break-word;
+ padding-left: 0.5em;
+ padding-right: 5px;
+}
+
+dl.field-list > dd {
+ padding-left: 0.5em;
+ margin-top: 0em;
+ margin-left: 0em;
+ margin-bottom: 0em;
+}
+
dl {
margin-bottom: 15px;
}
-dd p {
+dd > :first-child {
margin-top: 0px;
}
@@ -445,6 +674,21 @@ dd {
margin-left: 30px;
}
+.sig dd {
+ margin-top: 0px;
+ margin-bottom: 0px;
+}
+
+.sig dl {
+ margin-top: 0px;
+ margin-bottom: 0px;
+}
+
+dl > dd:last-child,
+dl > dd:last-child > :last-child {
+ margin-bottom: 0;
+}
+
dt:target, span.highlighted {
background-color: #fbe54e;
}
@@ -458,14 +702,6 @@ dl.glossary dt {
font-size: 1.1em;
}
-.optional {
- font-size: 1.3em;
-}
-
-.sig-paren {
- font-size: larger;
-}
-
.versionmodified {
font-style: italic;
}
@@ -504,11 +740,26 @@ dl.glossary dt {
font-style: oblique;
}
+.classifier:before {
+ font-style: normal;
+ margin: 0 0.5em;
+ content: ":";
+ display: inline-block;
+}
+
abbr, acronym {
border-bottom: dotted 1px;
cursor: help;
}
+.translated {
+ background-color: rgba(207, 255, 207, 0.2)
+}
+
+.untranslated {
+ background-color: rgba(255, 207, 207, 0.2)
+}
+
/* -- code displays --------------------------------------------------------- */
pre {
@@ -516,29 +767,69 @@ pre {
overflow-y: hidden; /* fixes display issues on Chrome browsers */
}
+pre, div[class*="highlight-"] {
+ clear: both;
+}
+
span.pre {
-moz-hyphens: none;
-ms-hyphens: none;
-webkit-hyphens: none;
hyphens: none;
+ white-space: nowrap;
+}
+
+div[class*="highlight-"] {
+ margin: 1em 0;
}
td.linenos pre {
- padding: 5px 0px;
border: 0;
background-color: transparent;
color: #aaa;
}
table.highlighttable {
- margin-left: 0.5em;
+ display: block;
+}
+
+table.highlighttable tbody {
+ display: block;
+}
+
+table.highlighttable tr {
+ display: flex;
}
table.highlighttable td {
- padding: 0 0.5em 0 0.5em;
+ margin: 0;
+ padding: 0;
+}
+
+table.highlighttable td.linenos {
+ padding-right: 0.5em;
+}
+
+table.highlighttable td.code {
+ flex: 1;
+ overflow: hidden;
+}
+
+.highlight .hll {
+ display: block;
+}
+
+div.highlight pre,
+table.highlighttable pre {
+ margin: 0;
+}
+
+div.code-block-caption + div {
+ margin-top: 0;
}
div.code-block-caption {
+ margin-top: 1em;
padding: 2px 5px;
font-size: small;
}
@@ -547,8 +838,14 @@ div.code-block-caption code {
background-color: transparent;
}
-div.code-block-caption + div > div.highlight > pre {
- margin-top: 0;
+table.highlighttable td.linenos,
+span.linenos,
+div.highlight span.gp { /* gp: Generic.Prompt */
+ user-select: none;
+ -webkit-user-select: text; /* Safari fallback only */
+ -webkit-user-select: none; /* Chrome/Safari */
+ -moz-user-select: none; /* Firefox */
+ -ms-user-select: none; /* IE10+ */
}
div.code-block-caption span.caption-number {
@@ -560,21 +857,7 @@ div.code-block-caption span.caption-text {
}
div.literal-block-wrapper {
- padding: 1em 1em 0;
-}
-
-div.literal-block-wrapper div.highlight {
- margin: 0;
-}
-
-code.descname {
- background-color: transparent;
- font-weight: bold;
- font-size: 1.2em;
-}
-
-code.descclassname {
- background-color: transparent;
+ margin: 1em 0;
}
code.xref, a code {
@@ -615,8 +898,7 @@ span.eqno {
}
span.eqno a.headerlink {
- position: relative;
- left: 0px;
+ position: absolute;
z-index: 1;
}
diff --git a/_static/doctools.js b/_static/doctools.js
index 0c15c00..4d67807 100644
--- a/_static/doctools.js
+++ b/_static/doctools.js
@@ -2,310 +2,155 @@
* doctools.js
* ~~~~~~~~~~~
*
- * Sphinx JavaScript utilities for all documentation.
+ * Base JavaScript utilities for all Sphinx HTML documentation.
*
- * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
-
-/**
- * select a different prefix for underscore
- */
-$u = _.noConflict();
-
-/**
- * make the code below compatible with browsers without
- * an installed firebug like debugger
-if (!window.console || !console.firebug) {
- var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
- "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
- "profile", "profileEnd"];
- window.console = {};
- for (var i = 0; i < names.length; ++i)
- window.console[names[i]] = function() {};
-}
- */
-
-/**
- * small helper function to urldecode strings
- */
-jQuery.urldecode = function(x) {
- return decodeURIComponent(x).replace(/\+/g, ' ');
-};
-
-/**
- * small helper function to urlencode strings
- */
-jQuery.urlencode = encodeURIComponent;
-
-/**
- * This function returns the parsed url parameters of the
- * current request. Multiple values per key are supported,
- * it will always return arrays of strings for the value parts.
- */
-jQuery.getQueryParameters = function(s) {
- if (typeof s === 'undefined')
- s = document.location.search;
- var parts = s.substr(s.indexOf('?') + 1).split('&');
- var result = {};
- for (var i = 0; i < parts.length; i++) {
- var tmp = parts[i].split('=', 2);
- var key = jQuery.urldecode(tmp[0]);
- var value = jQuery.urldecode(tmp[1]);
- if (key in result)
- result[key].push(value);
- else
- result[key] = [value];
+"use strict";
+
+const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
+ "TEXTAREA",
+ "INPUT",
+ "SELECT",
+ "BUTTON",
+]);
+
+const _ready = (callback) => {
+ if (document.readyState !== "loading") {
+ callback();
+ } else {
+ document.addEventListener("DOMContentLoaded", callback);
}
- return result;
};
-/**
- * highlight a given string on a jquery object by wrapping it in
- * span elements with the given class name.
- */
-jQuery.fn.highlightText = function(text, className) {
- function highlight(node, addItems) {
- if (node.nodeType === 3) {
- var val = node.nodeValue;
- var pos = val.toLowerCase().indexOf(text);
- if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) {
- var span;
- var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
- if (isInSVG) {
- span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
- } else {
- span = document.createElement("span");
- span.className = className;
- }
- span.appendChild(document.createTextNode(val.substr(pos, text.length)));
- node.parentNode.insertBefore(span, node.parentNode.insertBefore(
- document.createTextNode(val.substr(pos + text.length)),
- node.nextSibling));
- node.nodeValue = val.substr(0, pos);
- if (isInSVG) {
- var bbox = span.getBBox();
- var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
- rect.x.baseVal.value = bbox.x;
- rect.y.baseVal.value = bbox.y;
- rect.width.baseVal.value = bbox.width;
- rect.height.baseVal.value = bbox.height;
- rect.setAttribute('class', className);
- var parentOfText = node.parentNode.parentNode;
- addItems.push({
- "parent": node.parentNode,
- "target": rect});
- }
- }
- }
- else if (!jQuery(node).is("button, select, textarea")) {
- jQuery.each(node.childNodes, function() {
- highlight(this, addItems);
- });
- }
- }
- var addItems = [];
- var result = this.each(function() {
- highlight(this, addItems);
- });
- for (var i = 0; i < addItems.length; ++i) {
- jQuery(addItems[i].parent).before(addItems[i].target);
- }
- return result;
-};
-
-/*
- * backward compatibility for jQuery.browser
- * This will be supported until firefox bug is fixed.
- */
-if (!jQuery.browser) {
- jQuery.uaMatch = function(ua) {
- ua = ua.toLowerCase();
-
- var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
- /(webkit)[ \/]([\w.]+)/.exec(ua) ||
- /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
- /(msie) ([\w.]+)/.exec(ua) ||
- ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
- [];
-
- return {
- browser: match[ 1 ] || "",
- version: match[ 2 ] || "0"
- };
- };
- jQuery.browser = {};
- jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
-}
-
/**
* Small JavaScript module for the documentation.
*/
-var Documentation = {
-
- init : function() {
- this.fixFirefoxAnchorBug();
- this.highlightSearchWords();
- this.initIndexTable();
-
+const Documentation = {
+ init: () => {
+ Documentation.initDomainIndexTable();
+ Documentation.initOnKeyListeners();
},
/**
* i18n support
*/
- TRANSLATIONS : {},
- PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
- LOCALE : 'unknown',
+ TRANSLATIONS: {},
+ PLURAL_EXPR: (n) => (n === 1 ? 0 : 1),
+ LOCALE: "unknown",
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
- gettext : function(string) {
- var translated = Documentation.TRANSLATIONS[string];
- if (typeof translated === 'undefined')
- return string;
- return (typeof translated === 'string') ? translated : translated[0];
- },
-
- ngettext : function(singular, plural, n) {
- var translated = Documentation.TRANSLATIONS[singular];
- if (typeof translated === 'undefined')
- return (n == 1) ? singular : plural;
- return translated[Documentation.PLURALEXPR(n)];
+ gettext: (string) => {
+ const translated = Documentation.TRANSLATIONS[string];
+ switch (typeof translated) {
+ case "undefined":
+ return string; // no translation
+ case "string":
+ return translated; // translation exists
+ default:
+ return translated[0]; // (singular, plural) translation tuple exists
+ }
},
- addTranslations : function(catalog) {
- for (var key in catalog.messages)
- this.TRANSLATIONS[key] = catalog.messages[key];
- this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
- this.LOCALE = catalog.locale;
+ ngettext: (singular, plural, n) => {
+ const translated = Documentation.TRANSLATIONS[singular];
+ if (typeof translated !== "undefined")
+ return translated[Documentation.PLURAL_EXPR(n)];
+ return n === 1 ? singular : plural;
},
- /**
- * add context elements like header anchor links
- */
- addContextElements : function() {
- $('div[id] > :header:first').each(function() {
- $('\u00B6').
- attr('href', '#' + this.id).
- attr('title', _('Permalink to this headline')).
- appendTo(this);
- });
- $('dt[id]').each(function() {
- $('\u00B6').
- attr('href', '#' + this.id).
- attr('title', _('Permalink to this definition')).
- appendTo(this);
- });
+ addTranslations: (catalog) => {
+ Object.assign(Documentation.TRANSLATIONS, catalog.messages);
+ Documentation.PLURAL_EXPR = new Function(
+ "n",
+ `return (${catalog.plural_expr})`
+ );
+ Documentation.LOCALE = catalog.locale;
},
/**
- * workaround a firefox stupidity
- * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
+ * helper function to focus on search bar
*/
- fixFirefoxAnchorBug : function() {
- if (document.location.hash && $.browser.mozilla)
- window.setTimeout(function() {
- document.location.href += '';
- }, 10);
+ focusSearchBar: () => {
+ document.querySelectorAll("input[name=q]")[0]?.focus();
},
/**
- * highlight the search words provided in the url in the text
+ * Initialise the domain index toggle buttons
*/
- highlightSearchWords : function() {
- var params = $.getQueryParameters();
- var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
- if (terms.length) {
- var body = $('div.body');
- if (!body.length) {
- body = $('body');
+ initDomainIndexTable: () => {
+ const toggler = (el) => {
+ const idNumber = el.id.substr(7);
+ const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`);
+ if (el.src.substr(-9) === "minus.png") {
+ el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`;
+ toggledRows.forEach((el) => (el.style.display = "none"));
+ } else {
+ el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`;
+ toggledRows.forEach((el) => (el.style.display = ""));
}
- window.setTimeout(function() {
- $.each(terms, function() {
- body.highlightText(this.toLowerCase(), 'highlighted');
- });
- }, 10);
- $('' + _('Hide Search Matches') + '
')
- .appendTo($('#searchbox'));
- }
- },
-
- /**
- * init the domain index toggle buttons
- */
- initIndexTable : function() {
- var togglers = $('img.toggler').click(function() {
- var src = $(this).attr('src');
- var idnum = $(this).attr('id').substr(7);
- $('tr.cg-' + idnum).toggle();
- if (src.substr(-9) === 'minus.png')
- $(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
- else
- $(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
- }).css('display', '');
- if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
- togglers.click();
- }
- },
-
- /**
- * helper function to hide the search marks again
- */
- hideSearchWords : function() {
- $('#searchbox .highlight-link').fadeOut(300);
- $('span.highlighted').removeClass('highlighted');
- },
-
- /**
- * make the url absolute
- */
- makeURL : function(relativeURL) {
- return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
- },
+ };
- /**
- * get the current relative url
- */
- getCurrentURL : function() {
- var path = document.location.pathname;
- var parts = path.split(/\//);
- $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
- if (this === '..')
- parts.pop();
- });
- var url = parts.join('/');
- return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
+ const togglerElements = document.querySelectorAll("img.toggler");
+ togglerElements.forEach((el) =>
+ el.addEventListener("click", (event) => toggler(event.currentTarget))
+ );
+ togglerElements.forEach((el) => (el.style.display = ""));
+ if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler);
},
- initOnKeyListeners: function() {
- $(document).keyup(function(event) {
- var activeElementType = document.activeElement.tagName;
- // don't navigate when in search box or textarea
- if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') {
- switch (event.keyCode) {
- case 37: // left
- var prevHref = $('link[rel="prev"]').prop('href');
- if (prevHref) {
- window.location.href = prevHref;
- return false;
+ initOnKeyListeners: () => {
+ // only install a listener if it is really needed
+ if (
+ !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
+ !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS
+ )
+ return;
+
+ document.addEventListener("keydown", (event) => {
+ // bail for input elements
+ if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
+ // bail with special keys
+ if (event.altKey || event.ctrlKey || event.metaKey) return;
+
+ if (!event.shiftKey) {
+ switch (event.key) {
+ case "ArrowLeft":
+ if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
+
+ const prevLink = document.querySelector('link[rel="prev"]');
+ if (prevLink && prevLink.href) {
+ window.location.href = prevLink.href;
+ event.preventDefault();
}
- case 39: // right
- var nextHref = $('link[rel="next"]').prop('href');
- if (nextHref) {
- window.location.href = nextHref;
- return false;
+ break;
+ case "ArrowRight":
+ if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
+
+ const nextLink = document.querySelector('link[rel="next"]');
+ if (nextLink && nextLink.href) {
+ window.location.href = nextLink.href;
+ event.preventDefault();
}
+ break;
}
}
+
+ // some keyboard layouts may need Shift to get /
+ switch (event.key) {
+ case "/":
+ if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
+ Documentation.focusSearchBar();
+ event.preventDefault();
+ }
});
- }
+ },
};
// quick alias for translations
-_ = Documentation.gettext;
+const _ = Documentation.gettext;
-$(document).ready(function() {
- Documentation.init();
-});
\ No newline at end of file
+_ready(Documentation.init);
diff --git a/_static/documentation_options.js b/_static/documentation_options.js
new file mode 100644
index 0000000..3fc4608
--- /dev/null
+++ b/_static/documentation_options.js
@@ -0,0 +1,13 @@
+const DOCUMENTATION_OPTIONS = {
+ VERSION: '0.6.0',
+ LANGUAGE: 'en',
+ COLLAPSE_INDEX: false,
+ BUILDER: 'html',
+ FILE_SUFFIX: '.html',
+ LINK_SUFFIX: '.html',
+ HAS_SOURCE: true,
+ SOURCELINK_SUFFIX: '.txt',
+ NAVIGATION_WITH_KEYS: false,
+ SHOW_SEARCH_SUMMARY: true,
+ ENABLE_SEARCH_SHORTCUTS: true,
+};
\ No newline at end of file
diff --git a/_static/github-banner.svg b/_static/github-banner.svg
new file mode 100644
index 0000000..c47d9dc
--- /dev/null
+++ b/_static/github-banner.svg
@@ -0,0 +1,5 @@
+
diff --git a/_static/language_data.js b/_static/language_data.js
new file mode 100644
index 0000000..367b8ed
--- /dev/null
+++ b/_static/language_data.js
@@ -0,0 +1,199 @@
+/*
+ * language_data.js
+ * ~~~~~~~~~~~~~~~~
+ *
+ * This script contains the language-specific data used by searchtools.js,
+ * namely the list of stopwords, stemmer, scorer and splitter.
+ *
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
+
+
+/* Non-minified version is copied as a separate JS file, if available */
+
+/**
+ * Porter Stemmer
+ */
+var Stemmer = function() {
+
+ var step2list = {
+ ational: 'ate',
+ tional: 'tion',
+ enci: 'ence',
+ anci: 'ance',
+ izer: 'ize',
+ bli: 'ble',
+ alli: 'al',
+ entli: 'ent',
+ eli: 'e',
+ ousli: 'ous',
+ ization: 'ize',
+ ation: 'ate',
+ ator: 'ate',
+ alism: 'al',
+ iveness: 'ive',
+ fulness: 'ful',
+ ousness: 'ous',
+ aliti: 'al',
+ iviti: 'ive',
+ biliti: 'ble',
+ logi: 'log'
+ };
+
+ var step3list = {
+ icate: 'ic',
+ ative: '',
+ alize: 'al',
+ iciti: 'ic',
+ ical: 'ic',
+ ful: '',
+ ness: ''
+ };
+
+ var c = "[^aeiou]"; // consonant
+ var v = "[aeiouy]"; // vowel
+ var C = c + "[^aeiouy]*"; // consonant sequence
+ var V = v + "[aeiou]*"; // vowel sequence
+
+ var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
+ var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
+ var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
+ var s_v = "^(" + C + ")?" + v; // vowel in stem
+
+ this.stemWord = function (w) {
+ var stem;
+ var suffix;
+ var firstch;
+ var origword = w;
+
+ if (w.length < 3)
+ return w;
+
+ var re;
+ var re2;
+ var re3;
+ var re4;
+
+ firstch = w.substr(0,1);
+ if (firstch == "y")
+ w = firstch.toUpperCase() + w.substr(1);
+
+ // Step 1a
+ re = /^(.+?)(ss|i)es$/;
+ re2 = /^(.+?)([^s])s$/;
+
+ if (re.test(w))
+ w = w.replace(re,"$1$2");
+ else if (re2.test(w))
+ w = w.replace(re2,"$1$2");
+
+ // Step 1b
+ re = /^(.+?)eed$/;
+ re2 = /^(.+?)(ed|ing)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ re = new RegExp(mgr0);
+ if (re.test(fp[1])) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1];
+ re2 = new RegExp(s_v);
+ if (re2.test(stem)) {
+ w = stem;
+ re2 = /(at|bl|iz)$/;
+ re3 = new RegExp("([^aeiouylsz])\\1$");
+ re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re2.test(w))
+ w = w + "e";
+ else if (re3.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ else if (re4.test(w))
+ w = w + "e";
+ }
+ }
+
+ // Step 1c
+ re = /^(.+?)y$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(s_v);
+ if (re.test(stem))
+ w = stem + "i";
+ }
+
+ // Step 2
+ re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step2list[suffix];
+ }
+
+ // Step 3
+ re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step3list[suffix];
+ }
+
+ // Step 4
+ re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
+ re2 = /^(.+?)(s|t)(ion)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ if (re.test(stem))
+ w = stem;
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1] + fp[2];
+ re2 = new RegExp(mgr1);
+ if (re2.test(stem))
+ w = stem;
+ }
+
+ // Step 5
+ re = /^(.+?)e$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ re2 = new RegExp(meq1);
+ re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
+ w = stem;
+ }
+ re = /ll$/;
+ re2 = new RegExp(mgr1);
+ if (re.test(w) && re2.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+
+ // and turn initial Y back to y
+ if (firstch == "y")
+ w = firstch.toLowerCase() + w.substr(1);
+ return w;
+ }
+}
+
diff --git a/_static/pygments.css b/_static/pygments.css
index 20c4814..0d49244 100644
--- a/_static/pygments.css
+++ b/_static/pygments.css
@@ -1,5 +1,10 @@
+pre { line-height: 125%; }
+td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
+span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
+td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
+span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
.highlight .hll { background-color: #ffffcc }
-.highlight { background: #eeffcc; }
+.highlight { background: #eeffcc; }
.highlight .c { color: #408090; font-style: italic } /* Comment */
.highlight .err { border: 1px solid #FF0000 } /* Error */
.highlight .k { color: #007020; font-weight: bold } /* Keyword */
@@ -12,6 +17,7 @@
.highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #A00000 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
+.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */
.highlight .gr { color: #FF0000 } /* Generic.Error */
.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
.highlight .gi { color: #00A000 } /* Generic.Inserted */
diff --git a/_static/searchtools.js b/_static/searchtools.js
index 41b8336..b08d58c 100644
--- a/_static/searchtools.js
+++ b/_static/searchtools.js
@@ -1,730 +1,591 @@
/*
- * searchtools.js_t
+ * searchtools.js
* ~~~~~~~~~~~~~~~~
*
* Sphinx JavaScript utilities for the full-text search.
*
- * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
+"use strict";
-
-/* Non-minified version JS is _stemmer.js if file is provided */
/**
- * Porter Stemmer
+ * Simple result scoring code.
*/
-var Stemmer = function() {
-
- var step2list = {
- ational: 'ate',
- tional: 'tion',
- enci: 'ence',
- anci: 'ance',
- izer: 'ize',
- bli: 'ble',
- alli: 'al',
- entli: 'ent',
- eli: 'e',
- ousli: 'ous',
- ization: 'ize',
- ation: 'ate',
- ator: 'ate',
- alism: 'al',
- iveness: 'ive',
- fulness: 'ful',
- ousness: 'ous',
- aliti: 'al',
- iviti: 'ive',
- biliti: 'ble',
- logi: 'log'
+if (typeof Scorer === "undefined") {
+ var Scorer = {
+ // Implement the following function to further tweak the score for each result
+ // The function takes a result array [docname, title, anchor, descr, score, filename]
+ // and returns the new score.
+ /*
+ score: result => {
+ const [docname, title, anchor, descr, score, filename] = result
+ return score
+ },
+ */
+
+ // query matches the full name of an object
+ objNameMatch: 11,
+ // or matches in the last dotted part of the object name
+ objPartialMatch: 6,
+ // Additive scores depending on the priority of the object
+ objPrio: {
+ 0: 15, // used to be importantResults
+ 1: 5, // used to be objectResults
+ 2: -5, // used to be unimportantResults
+ },
+ // Used when the priority is not in the mapping.
+ objPrioDefault: 0,
+
+ // query found in title
+ title: 15,
+ partialTitle: 7,
+ // query found in terms
+ term: 5,
+ partialTerm: 2,
};
-
- var step3list = {
- icate: 'ic',
- ative: '',
- alize: 'al',
- iciti: 'ic',
- ical: 'ic',
- ful: '',
- ness: ''
- };
-
- var c = "[^aeiou]"; // consonant
- var v = "[aeiouy]"; // vowel
- var C = c + "[^aeiouy]*"; // consonant sequence
- var V = v + "[aeiou]*"; // vowel sequence
-
- var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
- var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
- var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
- var s_v = "^(" + C + ")?" + v; // vowel in stem
-
- this.stemWord = function (w) {
- var stem;
- var suffix;
- var firstch;
- var origword = w;
-
- if (w.length < 3)
- return w;
-
- var re;
- var re2;
- var re3;
- var re4;
-
- firstch = w.substr(0,1);
- if (firstch == "y")
- w = firstch.toUpperCase() + w.substr(1);
-
- // Step 1a
- re = /^(.+?)(ss|i)es$/;
- re2 = /^(.+?)([^s])s$/;
-
- if (re.test(w))
- w = w.replace(re,"$1$2");
- else if (re2.test(w))
- w = w.replace(re2,"$1$2");
-
- // Step 1b
- re = /^(.+?)eed$/;
- re2 = /^(.+?)(ed|ing)$/;
- if (re.test(w)) {
- var fp = re.exec(w);
- re = new RegExp(mgr0);
- if (re.test(fp[1])) {
- re = /.$/;
- w = w.replace(re,"");
- }
- }
- else if (re2.test(w)) {
- var fp = re2.exec(w);
- stem = fp[1];
- re2 = new RegExp(s_v);
- if (re2.test(stem)) {
- w = stem;
- re2 = /(at|bl|iz)$/;
- re3 = new RegExp("([^aeiouylsz])\\1$");
- re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
- if (re2.test(w))
- w = w + "e";
- else if (re3.test(w)) {
- re = /.$/;
- w = w.replace(re,"");
- }
- else if (re4.test(w))
- w = w + "e";
- }
- }
-
- // Step 1c
- re = /^(.+?)y$/;
- if (re.test(w)) {
- var fp = re.exec(w);
- stem = fp[1];
- re = new RegExp(s_v);
- if (re.test(stem))
- w = stem + "i";
- }
-
- // Step 2
- re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
- if (re.test(w)) {
- var fp = re.exec(w);
- stem = fp[1];
- suffix = fp[2];
- re = new RegExp(mgr0);
- if (re.test(stem))
- w = stem + step2list[suffix];
- }
-
- // Step 3
- re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
- if (re.test(w)) {
- var fp = re.exec(w);
- stem = fp[1];
- suffix = fp[2];
- re = new RegExp(mgr0);
- if (re.test(stem))
- w = stem + step3list[suffix];
- }
-
- // Step 4
- re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
- re2 = /^(.+?)(s|t)(ion)$/;
- if (re.test(w)) {
- var fp = re.exec(w);
- stem = fp[1];
- re = new RegExp(mgr1);
- if (re.test(stem))
- w = stem;
- }
- else if (re2.test(w)) {
- var fp = re2.exec(w);
- stem = fp[1] + fp[2];
- re2 = new RegExp(mgr1);
- if (re2.test(stem))
- w = stem;
- }
-
- // Step 5
- re = /^(.+?)e$/;
- if (re.test(w)) {
- var fp = re.exec(w);
- stem = fp[1];
- re = new RegExp(mgr1);
- re2 = new RegExp(meq1);
- re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
- if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
- w = stem;
- }
- re = /ll$/;
- re2 = new RegExp(mgr1);
- if (re.test(w) && re2.test(w)) {
- re = /.$/;
- w = w.replace(re,"");
- }
-
- // and turn initial Y back to y
- if (firstch == "y")
- w = firstch.toLowerCase() + w.substr(1);
- return w;
- }
}
-
+const _removeChildren = (element) => {
+ while (element && element.lastChild) element.removeChild(element.lastChild);
+};
/**
- * Simple result scoring code.
+ * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
*/
-var Scorer = {
- // Implement the following function to further tweak the score for each result
- // The function takes a result array [filename, title, anchor, descr, score]
- // and returns the new score.
- /*
- score: function(result) {
- return result[4];
- },
- */
-
- // query matches the full name of an object
- objNameMatch: 11,
- // or matches in the last dotted part of the object name
- objPartialMatch: 6,
- // Additive scores depending on the priority of the object
- objPrio: {0: 15, // used to be importantResults
- 1: 5, // used to be objectResults
- 2: -5}, // used to be unimportantResults
- // Used when the priority is not in the mapping.
- objPrioDefault: 0,
-
- // query found in title
- title: 15,
- // query found in terms
- term: 5
+const _escapeRegExp = (string) =>
+ string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
+
+const _displayItem = (item, searchTerms, highlightTerms) => {
+ const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
+ const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
+ const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
+ const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
+ const contentRoot = document.documentElement.dataset.content_root;
+
+ const [docName, title, anchor, descr, score, _filename] = item;
+
+ let listItem = document.createElement("li");
+ let requestUrl;
+ let linkUrl;
+ if (docBuilder === "dirhtml") {
+ // dirhtml builder
+ let dirname = docName + "/";
+ if (dirname.match(/\/index\/$/))
+ dirname = dirname.substring(0, dirname.length - 6);
+ else if (dirname === "index/") dirname = "";
+ requestUrl = contentRoot + dirname;
+ linkUrl = requestUrl;
+ } else {
+ // normal html builders
+ requestUrl = contentRoot + docName + docFileSuffix;
+ linkUrl = docName + docLinkSuffix;
+ }
+ let linkEl = listItem.appendChild(document.createElement("a"));
+ linkEl.href = linkUrl + anchor;
+ linkEl.dataset.score = score;
+ linkEl.innerHTML = title;
+ if (descr) {
+ listItem.appendChild(document.createElement("span")).innerHTML =
+ " (" + descr + ")";
+ // highlight search terms in the description
+ if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
+ highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted"));
+ }
+ else if (showSearchSummary)
+ fetch(requestUrl)
+ .then((responseData) => responseData.text())
+ .then((data) => {
+ if (data)
+ listItem.appendChild(
+ Search.makeSearchSummary(data, searchTerms, anchor)
+ );
+ // highlight search terms in the summary
+ if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
+ highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted"));
+ });
+ Search.output.appendChild(listItem);
+};
+const _finishSearch = (resultCount) => {
+ Search.stopPulse();
+ Search.title.innerText = _("Search Results");
+ if (!resultCount)
+ Search.status.innerText = Documentation.gettext(
+ "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
+ );
+ else
+ Search.status.innerText = _(
+ "Search finished, found ${resultCount} page(s) matching the search query."
+ ).replace('${resultCount}', resultCount);
+};
+const _displayNextItem = (
+ results,
+ resultCount,
+ searchTerms,
+ highlightTerms,
+) => {
+ // results left, load the summary and display it
+ // this is intended to be dynamic (don't sub resultsCount)
+ if (results.length) {
+ _displayItem(results.pop(), searchTerms, highlightTerms);
+ setTimeout(
+ () => _displayNextItem(results, resultCount, searchTerms, highlightTerms),
+ 5
+ );
+ }
+ // search finished, update title and status message
+ else _finishSearch(resultCount);
+};
+// Helper function used by query() to order search results.
+// Each input is an array of [docname, title, anchor, descr, score, filename].
+// Order the results by score (in opposite order of appearance, since the
+// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically.
+const _orderResultsByScoreThenName = (a, b) => {
+ const leftScore = a[4];
+ const rightScore = b[4];
+ if (leftScore === rightScore) {
+ // same score: sort alphabetically
+ const leftTitle = a[1].toLowerCase();
+ const rightTitle = b[1].toLowerCase();
+ if (leftTitle === rightTitle) return 0;
+ return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
+ }
+ return leftScore > rightScore ? 1 : -1;
};
-
-
-
-
-var splitChars = (function() {
- var result = {};
- var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
- 1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
- 2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
- 2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
- 3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
- 3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
- 4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
- 8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
- 11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
- 43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
- var i, j, start, end;
- for (i = 0; i < singles.length; i++) {
- result[singles[i]] = true;
- }
- var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
- [722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
- [1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
- [1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
- [1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
- [2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
- [2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
- [2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
- [2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
- [2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
- [2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
- [2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
- [3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
- [3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
- [3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
- [3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
- [3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
- [3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
- [4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
- [4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
- [4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
- [4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
- [5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
- [6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
- [6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
- [6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
- [6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
- [7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
- [7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
- [8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
- [8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
- [8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
- [10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
- [11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
- [12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
- [12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
- [12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
- [19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
- [42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
- [42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
- [43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
- [43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
- [43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
- [43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
- [44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
- [57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
- [64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
- [65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
- [65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
- for (i = 0; i < ranges.length; i++) {
- start = ranges[i][0];
- end = ranges[i][1];
- for (j = start; j <= end; j++) {
- result[j] = true;
- }
- }
- return result;
-})();
-
-function splitQuery(query) {
- var result = [];
- var start = -1;
- for (var i = 0; i < query.length; i++) {
- if (splitChars[query.charCodeAt(i)]) {
- if (start !== -1) {
- result.push(query.slice(start, i));
- start = -1;
- }
- } else if (start === -1) {
- start = i;
- }
- }
- if (start !== -1) {
- result.push(query.slice(start));
- }
- return result;
+/**
+ * Default splitQuery function. Can be overridden in ``sphinx.search`` with a
+ * custom function per language.
+ *
+ * The regular expression works by splitting the string on consecutive characters
+ * that are not Unicode letters, numbers, underscores, or emoji characters.
+ * This is the same as ``\W+`` in Python, preserving the surrogate pair area.
+ */
+if (typeof splitQuery === "undefined") {
+ var splitQuery = (query) => query
+ .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu)
+ .filter(term => term) // remove remaining empty strings
}
-
-
-
/**
* Search Module
*/
-var Search = {
-
- _index : null,
- _queued_query : null,
- _pulse_status : -1,
-
- init : function() {
- var params = $.getQueryParameters();
- if (params.q) {
- var query = params.q[0];
- $('input[name="q"]')[0].value = query;
- this.performSearch(query);
- }
+const Search = {
+ _index: null,
+ _queued_query: null,
+ _pulse_status: -1,
+
+ htmlToText: (htmlString, anchor) => {
+ const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
+ for (const removalQuery of [".headerlink", "script", "style"]) {
+ htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() });
+ }
+ if (anchor) {
+ const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`);
+ if (anchorContent) return anchorContent.textContent;
+
+ console.warn(
+ `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.`
+ );
+ }
+
+ // if anchor not specified or not found, fall back to main content
+ const docContent = htmlElement.querySelector('[role="main"]');
+ if (docContent) return docContent.textContent;
+
+ console.warn(
+ "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template."
+ );
+ return "";
},
- loadIndex : function(url) {
- $.ajax({type: "GET", url: url, data: null,
- dataType: "script", cache: true,
- complete: function(jqxhr, textstatus) {
- if (textstatus != "success") {
- document.getElementById("searchindexloader").src = url;
- }
- }});
+ init: () => {
+ const query = new URLSearchParams(window.location.search).get("q");
+ document
+ .querySelectorAll('input[name="q"]')
+ .forEach((el) => (el.value = query));
+ if (query) Search.performSearch(query);
},
- setIndex : function(index) {
- var q;
- this._index = index;
- if ((q = this._queued_query) !== null) {
- this._queued_query = null;
- Search.query(q);
+ loadIndex: (url) =>
+ (document.body.appendChild(document.createElement("script")).src = url),
+
+ setIndex: (index) => {
+ Search._index = index;
+ if (Search._queued_query !== null) {
+ const query = Search._queued_query;
+ Search._queued_query = null;
+ Search.query(query);
}
},
- hasIndex : function() {
- return this._index !== null;
- },
+ hasIndex: () => Search._index !== null,
- deferQuery : function(query) {
- this._queued_query = query;
- },
+ deferQuery: (query) => (Search._queued_query = query),
- stopPulse : function() {
- this._pulse_status = 0;
- },
+ stopPulse: () => (Search._pulse_status = -1),
- startPulse : function() {
- if (this._pulse_status >= 0)
- return;
- function pulse() {
- var i;
+ startPulse: () => {
+ if (Search._pulse_status >= 0) return;
+
+ const pulse = () => {
Search._pulse_status = (Search._pulse_status + 1) % 4;
- var dotString = '';
- for (i = 0; i < Search._pulse_status; i++)
- dotString += '.';
- Search.dots.text(dotString);
- if (Search._pulse_status > -1)
- window.setTimeout(pulse, 500);
- }
+ Search.dots.innerText = ".".repeat(Search._pulse_status);
+ if (Search._pulse_status >= 0) window.setTimeout(pulse, 500);
+ };
pulse();
},
/**
* perform a search for something (or wait until index is loaded)
*/
- performSearch : function(query) {
+ performSearch: (query) => {
// create the required interface elements
- this.out = $('#search-results');
- this.title = $('' + _('Searching') + '
').appendTo(this.out);
- this.dots = $('').appendTo(this.title);
- this.status = $('').appendTo(this.out);
- this.output = $('
').appendTo(this.out);
-
- $('#search-progress').text(_('Preparing search...'));
- this.startPulse();
+ const searchText = document.createElement("h2");
+ searchText.textContent = _("Searching");
+ const searchSummary = document.createElement("p");
+ searchSummary.classList.add("search-summary");
+ searchSummary.innerText = "";
+ const searchList = document.createElement("ul");
+ searchList.classList.add("search");
+
+ const out = document.getElementById("search-results");
+ Search.title = out.appendChild(searchText);
+ Search.dots = Search.title.appendChild(document.createElement("span"));
+ Search.status = out.appendChild(searchSummary);
+ Search.output = out.appendChild(searchList);
+
+ const searchProgress = document.getElementById("search-progress");
+ // Some themes don't use the search progress node
+ if (searchProgress) {
+ searchProgress.innerText = _("Preparing search...");
+ }
+ Search.startPulse();
// index already loaded, the browser was quick!
- if (this.hasIndex())
- this.query(query);
- else
- this.deferQuery(query);
+ if (Search.hasIndex()) Search.query(query);
+ else Search.deferQuery(query);
},
- /**
- * execute search (requires search index to be loaded)
- */
- query : function(query) {
- var i;
- var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
-
- // stem the searchterms and add them to the correct list
- var stemmer = new Stemmer();
- var searchterms = [];
- var excluded = [];
- var hlterms = [];
- var tmp = splitQuery(query);
- var objectterms = [];
- for (i = 0; i < tmp.length; i++) {
- if (tmp[i] !== "") {
- objectterms.push(tmp[i].toLowerCase());
- }
+ _parseQuery: (query) => {
+ // stem the search terms and add them to the correct list
+ const stemmer = new Stemmer();
+ const searchTerms = new Set();
+ const excludedTerms = new Set();
+ const highlightTerms = new Set();
+ const objectTerms = new Set(splitQuery(query.toLowerCase().trim()));
+ splitQuery(query.trim()).forEach((queryTerm) => {
+ const queryTermLower = queryTerm.toLowerCase();
+
+ // maybe skip this "word"
+ // stopwords array is from language_data.js
+ if (
+ stopwords.indexOf(queryTermLower) !== -1 ||
+ queryTerm.match(/^\d+$/)
+ )
+ return;
- if ($u.indexOf(stopwords, tmp[i].toLowerCase()) != -1 || tmp[i].match(/^\d+$/) ||
- tmp[i] === "") {
- // skip this "word"
- continue;
- }
// stem the word
- var word = stemmer.stemWord(tmp[i].toLowerCase());
- // prevent stemmer from cutting word smaller than two chars
- if(word.length < 3 && tmp[i].length >= 3) {
- word = tmp[i];
- }
- var toAppend;
+ let word = stemmer.stemWord(queryTermLower);
// select the correct list
- if (word[0] == '-') {
- toAppend = excluded;
- word = word.substr(1);
- }
+ if (word[0] === "-") excludedTerms.add(word.substr(1));
else {
- toAppend = searchterms;
- hlterms.push(tmp[i].toLowerCase());
+ searchTerms.add(word);
+ highlightTerms.add(queryTermLower);
}
- // only add if not already in the list
- if (!$u.contains(toAppend, word))
- toAppend.push(word);
+ });
+
+ if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js
+ localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" "))
}
- var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" "));
- // console.debug('SEARCH: searching for:');
- // console.info('required: ', searchterms);
- // console.info('excluded: ', excluded);
+ // console.debug("SEARCH: searching for:");
+ // console.info("required: ", [...searchTerms]);
+ // console.info("excluded: ", [...excludedTerms]);
- // prepare search
- var terms = this._index.terms;
- var titleterms = this._index.titleterms;
+ return [query, searchTerms, excludedTerms, highlightTerms, objectTerms];
+ },
- // array of [filename, title, anchor, descr, score]
- var results = [];
- $('#search-progress').empty();
+ /**
+ * execute search (requires search index to be loaded)
+ */
+ _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
+ const allTitles = Search._index.alltitles;
+ const indexEntries = Search._index.indexentries;
+
+ // Collect multiple result groups to be sorted separately and then ordered.
+ // Each is an array of [docname, title, anchor, descr, score, filename].
+ const normalResults = [];
+ const nonMainIndexResults = [];
+
+ _removeChildren(document.getElementById("search-progress"));
+
+ const queryLower = query.toLowerCase().trim();
+ for (const [title, foundTitles] of Object.entries(allTitles)) {
+ if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) {
+ for (const [file, id] of foundTitles) {
+ const score = Math.round(Scorer.title * queryLower.length / title.length);
+ const boost = titles[file] === title ? 1 : 0; // add a boost for document titles
+ normalResults.push([
+ docNames[file],
+ titles[file] !== title ? `${titles[file]} > ${title}` : title,
+ id !== null ? "#" + id : "",
+ null,
+ score + boost,
+ filenames[file],
+ ]);
+ }
+ }
+ }
- // lookup as object
- for (i = 0; i < objectterms.length; i++) {
- var others = [].concat(objectterms.slice(0, i),
- objectterms.slice(i+1, objectterms.length));
- results = results.concat(this.performObjectSearch(objectterms[i], others));
+ // search for explicit entries in index directives
+ for (const [entry, foundEntries] of Object.entries(indexEntries)) {
+ if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
+ for (const [file, id, isMain] of foundEntries) {
+ const score = Math.round(100 * queryLower.length / entry.length);
+ const result = [
+ docNames[file],
+ titles[file],
+ id ? "#" + id : "",
+ null,
+ score,
+ filenames[file],
+ ];
+ if (isMain) {
+ normalResults.push(result);
+ } else {
+ nonMainIndexResults.push(result);
+ }
+ }
+ }
}
+ // lookup as object
+ objectTerms.forEach((term) =>
+ normalResults.push(...Search.performObjectSearch(term, objectTerms))
+ );
+
// lookup as search terms in fulltext
- results = results.concat(this.performTermsSearch(searchterms, excluded, terms, titleterms));
+ normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms));
// let the scorer override scores with a custom scoring function
if (Scorer.score) {
- for (i = 0; i < results.length; i++)
- results[i][4] = Scorer.score(results[i]);
+ normalResults.forEach((item) => (item[4] = Scorer.score(item)));
+ nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item)));
}
- // now sort the results by score (in opposite order of appearance, since the
- // display function below uses pop() to retrieve items) and then
- // alphabetically
- results.sort(function(a, b) {
- var left = a[4];
- var right = b[4];
- if (left > right) {
- return 1;
- } else if (left < right) {
- return -1;
- } else {
- // same score: sort alphabetically
- left = a[1].toLowerCase();
- right = b[1].toLowerCase();
- return (left > right) ? -1 : ((left < right) ? 1 : 0);
+ // Sort each group of results by score and then alphabetically by name.
+ normalResults.sort(_orderResultsByScoreThenName);
+ nonMainIndexResults.sort(_orderResultsByScoreThenName);
+
+ // Combine the result groups in (reverse) order.
+ // Non-main index entries are typically arbitrary cross-references,
+ // so display them after other results.
+ let results = [...nonMainIndexResults, ...normalResults];
+
+ // remove duplicate search results
+ // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
+ let seen = new Set();
+ results = results.reverse().reduce((acc, result) => {
+ let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(',');
+ if (!seen.has(resultStr)) {
+ acc.push(result);
+ seen.add(resultStr);
}
- });
+ return acc;
+ }, []);
+
+ return results.reverse();
+ },
+
+ query: (query) => {
+ const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query);
+ const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms);
// for debugging
//Search.lastresults = results.slice(); // a copy
- //console.info('search results:', Search.lastresults);
+ // console.info("search results:", Search.lastresults);
// print the results
- var resultCount = results.length;
- function displayNextItem() {
- // results left, load the summary and display it
- if (results.length) {
- var item = results.pop();
- var listItem = $('');
- if (DOCUMENTATION_OPTIONS.FILE_SUFFIX === '') {
- // dirhtml builder
- var dirname = item[0] + '/';
- if (dirname.match(/\/index\/$/)) {
- dirname = dirname.substring(0, dirname.length-6);
- } else if (dirname == 'index/') {
- dirname = '';
- }
- listItem.append($('').attr('href',
- DOCUMENTATION_OPTIONS.URL_ROOT + dirname +
- highlightstring + item[2]).html(item[1]));
- } else {
- // normal html builders
- listItem.append($('').attr('href',
- item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX +
- highlightstring + item[2]).html(item[1]));
- }
- if (item[3]) {
- listItem.append($(' (' + item[3] + ')'));
- Search.output.append(listItem);
- listItem.slideDown(5, function() {
- displayNextItem();
- });
- } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
- var suffix = DOCUMENTATION_OPTIONS.SOURCELINK_SUFFIX;
- if (suffix === undefined) {
- suffix = '.txt';
- }
- $.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + item[5] + (item[5].slice(-suffix.length) === suffix ? '' : suffix),
- dataType: "text",
- complete: function(jqxhr, textstatus) {
- var data = jqxhr.responseText;
- if (data !== '' && data !== undefined) {
- listItem.append(Search.makeSearchSummary(data, searchterms, hlterms));
- }
- Search.output.append(listItem);
- listItem.slideDown(5, function() {
- displayNextItem();
- });
- }});
- } else {
- // no source available, just display title
- Search.output.append(listItem);
- listItem.slideDown(5, function() {
- displayNextItem();
- });
- }
- }
- // search finished, update title and status message
- else {
- Search.stopPulse();
- Search.title.text(_('Search Results'));
- if (!resultCount)
- Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.'));
- else
- Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount));
- Search.status.fadeIn(500);
- }
- }
- displayNextItem();
+ _displayNextItem(results, results.length, searchTerms, highlightTerms);
},
/**
* search for object names
*/
- performObjectSearch : function(object, otherterms) {
- var filenames = this._index.filenames;
- var docnames = this._index.docnames;
- var objects = this._index.objects;
- var objnames = this._index.objnames;
- var titles = this._index.titles;
-
- var i;
- var results = [];
-
- for (var prefix in objects) {
- for (var name in objects[prefix]) {
- var fullname = (prefix ? prefix + '.' : '') + name;
- if (fullname.toLowerCase().indexOf(object) > -1) {
- var score = 0;
- var parts = fullname.split('.');
- // check for different match types: exact matches of full name or
- // "last name" (i.e. last dotted part)
- if (fullname == object || parts[parts.length - 1] == object) {
- score += Scorer.objNameMatch;
- // matches in last name
- } else if (parts[parts.length - 1].indexOf(object) > -1) {
- score += Scorer.objPartialMatch;
- }
- var match = objects[prefix][name];
- var objname = objnames[match[1]][2];
- var title = titles[match[0]];
- // If more than one term searched for, we require other words to be
- // found in the name/title/description
- if (otherterms.length > 0) {
- var haystack = (prefix + ' ' + name + ' ' +
- objname + ' ' + title).toLowerCase();
- var allfound = true;
- for (i = 0; i < otherterms.length; i++) {
- if (haystack.indexOf(otherterms[i]) == -1) {
- allfound = false;
- break;
- }
- }
- if (!allfound) {
- continue;
- }
- }
- var descr = objname + _(', in ') + title;
-
- var anchor = match[3];
- if (anchor === '')
- anchor = fullname;
- else if (anchor == '-')
- anchor = objnames[match[1]][1] + '-' + fullname;
- // add custom score for some objects according to scorer
- if (Scorer.objPrio.hasOwnProperty(match[2])) {
- score += Scorer.objPrio[match[2]];
- } else {
- score += Scorer.objPrioDefault;
- }
- results.push([docnames[match[0]], fullname, '#'+anchor, descr, score, filenames[match[0]]]);
- }
+ performObjectSearch: (object, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const objects = Search._index.objects;
+ const objNames = Search._index.objnames;
+ const titles = Search._index.titles;
+
+ const results = [];
+
+ const objectSearchCallback = (prefix, match) => {
+ const name = match[4]
+ const fullname = (prefix ? prefix + "." : "") + name;
+ const fullnameLower = fullname.toLowerCase();
+ if (fullnameLower.indexOf(object) < 0) return;
+
+ let score = 0;
+ const parts = fullnameLower.split(".");
+
+ // check for different match types: exact matches of full name or
+ // "last name" (i.e. last dotted part)
+ if (fullnameLower === object || parts.slice(-1)[0] === object)
+ score += Scorer.objNameMatch;
+ else if (parts.slice(-1)[0].indexOf(object) > -1)
+ score += Scorer.objPartialMatch; // matches in last name
+
+ const objName = objNames[match[1]][2];
+ const title = titles[match[0]];
+
+ // If more than one term searched for, we require other words to be
+ // found in the name/title/description
+ const otherTerms = new Set(objectTerms);
+ otherTerms.delete(object);
+ if (otherTerms.size > 0) {
+ const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase();
+ if (
+ [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0)
+ )
+ return;
}
- }
+ let anchor = match[3];
+ if (anchor === "") anchor = fullname;
+ else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname;
+
+ const descr = objName + _(", in ") + title;
+
+ // add custom score for some objects according to scorer
+ if (Scorer.objPrio.hasOwnProperty(match[2]))
+ score += Scorer.objPrio[match[2]];
+ else score += Scorer.objPrioDefault;
+
+ results.push([
+ docNames[match[0]],
+ fullname,
+ "#" + anchor,
+ descr,
+ score,
+ filenames[match[0]],
+ ]);
+ };
+ Object.keys(objects).forEach((prefix) =>
+ objects[prefix].forEach((array) =>
+ objectSearchCallback(prefix, array)
+ )
+ );
return results;
},
/**
* search for full-text terms in the index
*/
- performTermsSearch : function(searchterms, excluded, terms, titleterms) {
- var docnames = this._index.docnames;
- var filenames = this._index.filenames;
- var titles = this._index.titles;
+ performTermsSearch: (searchTerms, excludedTerms) => {
+ // prepare search
+ const terms = Search._index.terms;
+ const titleTerms = Search._index.titleterms;
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
- var i, j, file;
- var fileMap = {};
- var scoreMap = {};
- var results = [];
+ const scoreMap = new Map();
+ const fileMap = new Map();
// perform the search on the required terms
- for (i = 0; i < searchterms.length; i++) {
- var word = searchterms[i];
- var files = [];
- var _o = [
- {files: terms[word], score: Scorer.term},
- {files: titleterms[word], score: Scorer.title}
+ searchTerms.forEach((word) => {
+ const files = [];
+ const arr = [
+ { files: terms[word], score: Scorer.term },
+ { files: titleTerms[word], score: Scorer.title },
];
+ // add support for partial matches
+ if (word.length > 2) {
+ const escapedWord = _escapeRegExp(word);
+ if (!terms.hasOwnProperty(word)) {
+ Object.keys(terms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: terms[term], score: Scorer.partialTerm });
+ });
+ }
+ if (!titleTerms.hasOwnProperty(word)) {
+ Object.keys(titleTerms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: titleTerms[term], score: Scorer.partialTitle });
+ });
+ }
+ }
// no match but word was a required one
- if ($u.every(_o, function(o){return o.files === undefined;})) {
- break;
- }
+ if (arr.every((record) => record.files === undefined)) return;
+
// found search word in contents
- $u.each(_o, function(o) {
- var _files = o.files;
- if (_files === undefined)
- return
-
- if (_files.length === undefined)
- _files = [_files];
- files = files.concat(_files);
-
- // set score for the word in each file to Scorer.term
- for (j = 0; j < _files.length; j++) {
- file = _files[j];
- if (!(file in scoreMap))
- scoreMap[file] = {}
- scoreMap[file][word] = o.score;
- }
+ arr.forEach((record) => {
+ if (record.files === undefined) return;
+
+ let recordFiles = record.files;
+ if (recordFiles.length === undefined) recordFiles = [recordFiles];
+ files.push(...recordFiles);
+
+ // set score for the word in each file
+ recordFiles.forEach((file) => {
+ if (!scoreMap.has(file)) scoreMap.set(file, {});
+ scoreMap.get(file)[word] = record.score;
+ });
});
// create the mapping
- for (j = 0; j < files.length; j++) {
- file = files[j];
- if (file in fileMap)
- fileMap[file].push(word);
- else
- fileMap[file] = [word];
- }
- }
+ files.forEach((file) => {
+ if (!fileMap.has(file)) fileMap.set(file, [word]);
+ else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word);
+ });
+ });
// now check if the files don't contain excluded terms
- for (file in fileMap) {
- var valid = true;
-
+ const results = [];
+ for (const [file, wordList] of fileMap) {
// check if all requirements are matched
- if (fileMap[file].length != searchterms.length)
- continue;
+
+ // as search terms with length < 3 are discarded
+ const filteredTermCount = [...searchTerms].filter(
+ (term) => term.length > 2
+ ).length;
+ if (
+ wordList.length !== searchTerms.size &&
+ wordList.length !== filteredTermCount
+ )
+ continue;
// ensure that none of the excluded terms is in the search result
- for (i = 0; i < excluded.length; i++) {
- if (terms[excluded[i]] == file ||
- titleterms[excluded[i]] == file ||
- $u.contains(terms[excluded[i]] || [], file) ||
- $u.contains(titleterms[excluded[i]] || [], file)) {
- valid = false;
- break;
- }
- }
+ if (
+ [...excludedTerms].some(
+ (term) =>
+ terms[term] === file ||
+ titleTerms[term] === file ||
+ (terms[term] || []).includes(file) ||
+ (titleTerms[term] || []).includes(file)
+ )
+ )
+ break;
- // if we have still a valid result we can add it to the result list
- if (valid) {
- // select one (max) score for the file.
- // for better ranking, we should calculate ranking by using words statistics like basic tf-idf...
- var score = $u.max($u.map(fileMap[file], function(w){return scoreMap[file][w]}));
- results.push([docnames[file], titles[file], '', null, score, filenames[file]]);
- }
+ // select one (max) score for the file.
+ const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
+ // add result to the result list
+ results.push([
+ docNames[file],
+ titles[file],
+ "",
+ null,
+ score,
+ filenames[file],
+ ]);
}
return results;
},
@@ -732,30 +593,28 @@ var Search = {
/**
* helper function to return a node containing the
* search summary for a given text. keywords is a list
- * of stemmed words, hlwords is the list of normal, unstemmed
- * words. the first one is used to find the occurrence, the
- * latter for highlighting it.
+ * of stemmed words.
*/
- makeSearchSummary : function(text, keywords, hlwords) {
- var textLower = text.toLowerCase();
- var start = 0;
- $.each(keywords, function() {
- var i = textLower.indexOf(this.toLowerCase());
- if (i > -1)
- start = i;
- });
- start = Math.max(start - 120, 0);
- var excerpt = ((start > 0) ? '...' : '') +
- $.trim(text.substr(start, 240)) +
- ((start + 240 - text.length) ? '...' : '');
- var rv = $('').text(excerpt);
- $.each(hlwords, function() {
- rv = rv.highlightText(this, 'highlighted');
- });
- return rv;
- }
+ makeSearchSummary: (htmlText, keywords, anchor) => {
+ const text = Search.htmlToText(htmlText, anchor);
+ if (text === "") return null;
+
+ const textLower = text.toLowerCase();
+ const actualStartPosition = [...keywords]
+ .map((k) => textLower.indexOf(k.toLowerCase()))
+ .filter((i) => i > -1)
+ .slice(-1)[0];
+ const startWithContext = Math.max(actualStartPosition - 120, 0);
+
+ const top = startWithContext === 0 ? "" : "...";
+ const tail = startWithContext + 240 < text.length ? "..." : "";
+
+ let summary = document.createElement("p");
+ summary.classList.add("context");
+ summary.textContent = top + text.substr(startWithContext, 240).trim() + tail;
+
+ return summary;
+ },
};
-$(document).ready(function() {
- Search.init();
-});
\ No newline at end of file
+_ready(Search.init);
diff --git a/_static/sphinx_highlight.js b/_static/sphinx_highlight.js
new file mode 100644
index 0000000..8a96c69
--- /dev/null
+++ b/_static/sphinx_highlight.js
@@ -0,0 +1,154 @@
+/* Highlighting utilities for Sphinx HTML documentation. */
+"use strict";
+
+const SPHINX_HIGHLIGHT_ENABLED = true
+
+/**
+ * highlight a given string on a node by wrapping it in
+ * span elements with the given class name.
+ */
+const _highlight = (node, addItems, text, className) => {
+ if (node.nodeType === Node.TEXT_NODE) {
+ const val = node.nodeValue;
+ const parent = node.parentNode;
+ const pos = val.toLowerCase().indexOf(text);
+ if (
+ pos >= 0 &&
+ !parent.classList.contains(className) &&
+ !parent.classList.contains("nohighlight")
+ ) {
+ let span;
+
+ const closestNode = parent.closest("body, svg, foreignObject");
+ const isInSVG = closestNode && closestNode.matches("svg");
+ if (isInSVG) {
+ span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
+ } else {
+ span = document.createElement("span");
+ span.classList.add(className);
+ }
+
+ span.appendChild(document.createTextNode(val.substr(pos, text.length)));
+ const rest = document.createTextNode(val.substr(pos + text.length));
+ parent.insertBefore(
+ span,
+ parent.insertBefore(
+ rest,
+ node.nextSibling
+ )
+ );
+ node.nodeValue = val.substr(0, pos);
+ /* There may be more occurrences of search term in this node. So call this
+ * function recursively on the remaining fragment.
+ */
+ _highlight(rest, addItems, text, className);
+
+ if (isInSVG) {
+ const rect = document.createElementNS(
+ "http://www.w3.org/2000/svg",
+ "rect"
+ );
+ const bbox = parent.getBBox();
+ rect.x.baseVal.value = bbox.x;
+ rect.y.baseVal.value = bbox.y;
+ rect.width.baseVal.value = bbox.width;
+ rect.height.baseVal.value = bbox.height;
+ rect.setAttribute("class", className);
+ addItems.push({ parent: parent, target: rect });
+ }
+ }
+ } else if (node.matches && !node.matches("button, select, textarea")) {
+ node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
+ }
+};
+const _highlightText = (thisNode, text, className) => {
+ let addItems = [];
+ _highlight(thisNode, addItems, text, className);
+ addItems.forEach((obj) =>
+ obj.parent.insertAdjacentElement("beforebegin", obj.target)
+ );
+};
+
+/**
+ * Small JavaScript module for the documentation.
+ */
+const SphinxHighlight = {
+
+ /**
+ * highlight the search words provided in localstorage in the text
+ */
+ highlightSearchWords: () => {
+ if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight
+
+ // get and clear terms from localstorage
+ const url = new URL(window.location);
+ const highlight =
+ localStorage.getItem("sphinx_highlight_terms")
+ || url.searchParams.get("highlight")
+ || "";
+ localStorage.removeItem("sphinx_highlight_terms")
+ url.searchParams.delete("highlight");
+ window.history.replaceState({}, "", url);
+
+ // get individual terms from highlight string
+ const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
+ if (terms.length === 0) return; // nothing to do
+
+ // There should never be more than one element matching "div.body"
+ const divBody = document.querySelectorAll("div.body");
+ const body = divBody.length ? divBody[0] : document.querySelector("body");
+ window.setTimeout(() => {
+ terms.forEach((term) => _highlightText(body, term, "highlighted"));
+ }, 10);
+
+ const searchBox = document.getElementById("searchbox");
+ if (searchBox === null) return;
+ searchBox.appendChild(
+ document
+ .createRange()
+ .createContextualFragment(
+ '' +
+ '' +
+ _("Hide Search Matches") +
+ "
"
+ )
+ );
+ },
+
+ /**
+ * helper function to hide the search marks again
+ */
+ hideSearchWords: () => {
+ document
+ .querySelectorAll("#searchbox .highlight-link")
+ .forEach((el) => el.remove());
+ document
+ .querySelectorAll("span.highlighted")
+ .forEach((el) => el.classList.remove("highlighted"));
+ localStorage.removeItem("sphinx_highlight_terms")
+ },
+
+ initEscapeListener: () => {
+ // only install a listener if it is really needed
+ if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return;
+
+ document.addEventListener("keydown", (event) => {
+ // bail for input elements
+ if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
+ // bail with special keys
+ if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return;
+ if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) {
+ SphinxHighlight.hideSearchWords();
+ event.preventDefault();
+ }
+ });
+ },
+};
+
+_ready(() => {
+ /* Do not call highlightSearchWords() when we are on the search page.
+ * It will highlight words from the *previous* search query.
+ */
+ if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords();
+ SphinxHighlight.initEscapeListener();
+});
diff --git a/examples.html b/examples.html
index eb7c54f..0990e14 100644
--- a/examples.html
+++ b/examples.html
@@ -1,27 +1,17 @@
+
-
-
-
+
-
- Nolds examples — Nolds 0.5.1 documentation
-
-
-
-
-
-
-
+
+
+
+ Nolds examples — Nolds 0.6.0 documentation
+
+
+
+
+
+
@@ -29,195 +19,219 @@
+
+
-
-
-
+
+
+
-
-Nolds examples¶
+
+Nolds examples¶
You can run some examples for the functions in nolds with the command
-python -m nolds.examples <key>
where <key>
can be one of the following:
+python -m nolds.examples <key>
where <key>
can be one of the following:
-lyapunov-logistic
shows a bifurcation plot of the logistic map and compares
-the true lyapunov exponent to the estimates obtained with lyap_e
and
-lyap_r
.
-lyapunov-tent
shows the same plot as lyapunov-logistic
, but for the tent
-map.
-profiling
runs a profiling test with the package cProfile
.
-hurst-weron2
plots a reconstruction of figure 2 of the weron 2002 paper
-about the hurst exponent.
-hurst-hist
plots a histogram of hurst exponents obtained for random noise.
-hurst-nvals
creates a plot that compares the results of different choices for nvals
-for the function hurst_rs
.
+lyapunov-logistic
shows a bifurcation plot of the logistic map and compares
+the true lyapunov exponent to the estimates obtained with lyap_e
and
+lyap_r
.
+lyapunov-tent
shows the same plot as lyapunov-logistic
, but for the tent
+map.
+profiling
runs a profiling test with the package cProfile
.
+hurst-weron2
plots a reconstruction of figure 2 of the weron 2002 paper
+about the hurst exponent.
+hurst-hist
plots a histogram of hurst exponents obtained for random noise.
+hurst-nvals
creates a plot that compares the results of different choices for nvals
+for the function hurst_rs
.
+sampen-tol
compares old and new default tolerance values for sampen
.
+hurst_mf_stock
example function recreates a plot from Di Matteo (2003).
+barabasi_1991_figure2
and barabasi_1991_figure3
recreate the respective plots from Barabasi et al. (1991)
+lorenz
calculates all main measures of nolds
for x, y, and z coordinates of a Lorenz plot and compares them to prescribed values from the literature.
-These tests are also available as functions inside the module nolds.examples
.
-
-Functions in nolds.examples
¶
-
--
-
nolds.examples.
plot_lyap
(maptype='logistic')[source]¶
+These tests are also available as functions inside the module nolds.examples
.
+
+Functions in nolds.examples
¶
+
+-
+nolds.examples.plot_lyap(maptype='logistic')[source]¶
Plots a bifurcation plot of the given map and superimposes the true
lyapunov exponent as well as the estimates of the largest lyapunov exponent
-obtained by lyap_r
and lyap_e
. The idea for this plot is taken from [ll].
-This function requires the package matplotlib
.
+obtained by lyap_r
and lyap_e
. The idea for this plot is taken
+from [ll].
+This function requires the package matplotlib
.
References:
-
-
-
-[ll] Manfred Füllsack, “Lyapunov exponent”,
-url: http://systems-sciences.uni-graz.at/etextbook/sw2/lyapunov.html
-
-
-
-- Kwargs:
-
-- maptype (str):
-- can be either
"logistic"
for the logistic map or "tent"
for the tent
-map.
+
+
+[ll]
+Manfred Füllsack, “Lyapunov exponent”,
+url: http://systems-sciences.uni-graz.at/etextbook/sw2/lyapunov.html
+
+
+
+- Kwargs:
+- maptype (str):
can be either "logistic"
for the logistic map or "tent"
for the
+tent map.
+
-
--
-
nolds.examples.
profiling
()[source]¶
-Runs a profiling test for the function lyap_e
(mainly used for development)
-This function requires the package cProfile
.
+
+-
+nolds.examples.profiling()[source]¶
+Runs a profiling test for the function lyap_e
(mainly used for
+development)
+This function requires the package cProfile
.
-
--
-
nolds.examples.
weron_2002_figure2
(n=10000)[source]¶
-Recreates figure 2 of [w] comparing the reported values by Weron to the
+
+-
+nolds.examples.weron_2002_figure2(n=10000)[source]¶
+Recreates figure 2 of [w] comparing the reported values by Weron to the
values obtained by the functions in this package.
The experiment consists of n iterations where the hurst exponent of randomly
generated gaussian noise is calculated. This is done with differing sequence
lengths of 256, 512, 1024, …., 65536. The average estimated hurst exponent
over all iterations is plotted for the following configurations:
-weron
is the Anis-Lloyd-corrected Hurst exponent calculated by Weron
-rs50
is the Anis-Lloyd-corrected Hurst exponent calculated by Nolds with
-the same parameters as used by Weron
-weron_raw
is the uncorrected Hurst exponent calculated by Weron
-rs50_raw
is the uncorrected Hurst exponent calculated by Nolds with the
-same parameters as used by Weron
-rsn
is the Anis-Lloyd-corrected Hurst exponent calculated by Nolds with
-the default settings of Nolds
+weron
is the Anis-Lloyd-corrected Hurst exponent calculated by Weron
+rs50
is the Anis-Lloyd-corrected Hurst exponent calculated by Nolds
+with the same parameters as used by Weron
+weron_raw
is the uncorrected Hurst exponent calculated by Weron
+rs50_raw
is the uncorrected Hurst exponent calculated by Nolds with the
+same parameters as used by Weron
+rsn
is the Anis-Lloyd-corrected Hurst exponent calculated by Nolds with
+the default settings of Nolds
The values reported by Weron are only measured from the plot in the PDF
version of the paper and can therefore have some small inaccuracies.
-This function requires the package matplotlib
.
+This function requires the package matplotlib
.
References:
-
-
-
-[w] R. Weron, “Estimating long-range dependence: finite sample
+
+
+[w]
+R. Weron, “Estimating long-range dependence: finite sample
properties and confidence intervals,” Physica A: Statistical Mechanics
-and its Applications, vol. 312, no. 1, pp. 285–299, 2002.
-
-
-
-- Kwargs:
-
-- n (int):
-- number of iterations of the experiment (Weron used 10000, but this takes
-a while)
+and its Applications, vol. 312, no. 1, pp. 285–299, 2002.
+
+
+
+- Kwargs:
+- n (int):
number of iterations of the experiment (Weron used 10000, but this takes
+a while)
+
-
--
-
nolds.examples.
plot_hurst_hist
()[source]¶
+
+-
+nolds.examples.plot_hurst_hist()[source]¶
Plots a histogram of values obtained for the hurst exponent of uniformly
distributed white noise.
-This function requires the package matplotlib
.
+This function requires the package matplotlib
.
-
--
-
nolds.examples.
hurst_compare_nvals
(data, nvals=None)[source]¶
+
+-
+nolds.examples.hurst_compare_nvals(data, nvals=None)[source]¶
Creates a plot that compares the results of different choices for nvals
for the function hurst_rs.
-
-- Args:
-
-- data (array-like of float):
-- the input data from which the hurst exponent should be estimated
+
+- Args:
+- data (array-like of float):
the input data from which the hurst exponent should be estimated
+
-- Kwargs:
-
-- nvals (array of int):
-- a manually selected value for the nvals parameter that should be plotted
-in comparison to the default choices
+- Kwargs:
+- nvals (array of int):
a manually selected value for the nvals parameter that should be plotted
+in comparison to the default choices
+
-
-
+
+
+
-
+
- Table Of Contents
-
-- Nolds examples
-- Functions in
nolds.examples
+Nolds
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Navigation
+
+
Related Topics
- Documentation overview
- - Previous:
nolds
module
+ - Previous:
nolds
module
- Next: Nolds Unittests
-
- This Page
-
- - Show Source
-
-
-
- Quick search
-
-
-
+
+
+
+
+
+
+
+
- ©2016-2018, Christopher Schölzel.
+ ©2016-2024, Christopher Schölzel.
|
- Powered by Sphinx 1.6.6
- & Alabaster 0.7.10
+ Powered by Sphinx 8.0.2
+ & Alabaster 1.0.0
|
-
-
-
-
+
-
- Index — Nolds 0.5.1 documentation
-
-
-
-
-
-
-
+
+
+ Index — Nolds 0.6.0 documentation
+
+
+
+
+
+
+
+
-
-
-
+
+
+
@@ -51,6 +41,7 @@ Index
| F
| H
| L
+ | M
| P
| Q
| S
@@ -61,10 +52,12 @@ Index
B
Reference code:
-
-
-
-[le_a] Manfred Füllsack, “Lyapunov exponent”,
-url: http://systems-sciences.uni-graz.at/etextbook/sw2/lyapunov.html
-
-
-
-
-
-[le_b] Steve SIU, Lyapunov Exponents Toolbox (LET),
-url: http://www.mathworks.com/matlabcentral/fileexchange/233-let/content/LET/findlyap.m
-
-
-
-
-
-[le_c] Rainer Hegger, Holger Kantz, and Thomas Schreiber, TISEAN,
-url: http://www.mpipks-dresden.mpg.de/~tisean/Tisean_3.0.1/index.html
-
-
-
-Args:
-
-- data (array-like of float):
-- (scalar) data points
-
-
-Kwargs:
-
-- emb_dim (int):
-- embedding dimension
-- matrix_dim (int):
-- matrix dimension (emb_dim - 1 must be divisible by matrix_dim - 1)
-- min_nb (int):
-- minimal number of neighbors
-(default: min(2 * matrix_dim, matrix_dim + 4))
-- min_tsep (int):
-- minimal temporal separation between two “neighbors”
-- tau (float):
-- step size of the data in seconds
-(normalization scaling factor for exponents)
-- debug_plot (boolean):
-- if True, a histogram matrix of the individual estimates will be shown
-- debug_data (boolean):
-- if True, debugging data will be returned alongside the result
-- plot_file (str):
-- if debug_plot is True and plot_file is not None, the plot will be saved
+vol. 34, no. 6, pp. 4971–4979, 1986.
+
+
+
+Reference code:
+
+[le_a]
+Manfred Füllsack, “Lyapunov exponent”,
+url: http://systems-sciences.uni-graz.at/etextbook/sw2/lyapunov.html
+
+
+[le_b]
+Steve SIU, Lyapunov Exponents Toolbox (LET),
+url: http://www.mathworks.com/matlabcentral/fileexchange/233-let/content/LET/findlyap.m
+
+
+[le_c]
+Rainer Hegger, Holger Kantz, and Thomas Schreiber, TISEAN,
+url: http://www.mpipks-dresden.mpg.de/~tisean/Tisean_3.0.1/index.html
+
+
+
+Args:
+- data (array-like of float):
(scalar) data points
+
+
+
+Kwargs:
+- emb_dim (int):
embedding dimension
+
+- matrix_dim (int):
matrix dimension (emb_dim - 1 must be divisible by matrix_dim - 1)
+
+- min_nb (int):
minimal number of neighbors
+(default: min(2 * matrix_dim, matrix_dim + 4))
+
+- min_tsep (int):
minimal temporal separation between two “neighbors”
+
+- tau (float):
step size of the data in seconds
+(normalization scaling factor for exponents)
+
+- debug_plot (boolean):
if True, a histogram matrix of the individual estimates will be shown
+
+- debug_data (boolean):
if True, debugging data will be returned alongside the result
+
+- plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
under the given file name instead of directly showing it through
-plt.show()
+plt.show()
+
-Returns:
-
-- float array:
-- array of matrix_dim Lyapunov exponents (positive exponents are indicators
-for chaos)
-- 2d-array of floats:
-- only present if debug_data is True: all estimates for the matrix_dim
+
- Returns:
+- float array:
array of matrix_dim Lyapunov exponents (positive exponents are indicators
+for chaos)
+
+- 2d-array of floats:
only present if debug_data is True: all estimates for the matrix_dim
Lyapunov exponents from the x iterations of R_i. The shape of this debug
-data is (x, matrix_dim).
+data is (x, matrix_dim).
+
-
-
-Sample entropy¶
-
--
-
nolds.
sampen
(data, emb_dim=2, tolerance=None, dist=<function rowwise_chebyshev>, debug_plot=False, debug_data=False, plot_file=None)[source]¶
+
+
+Sample entropy¶
+
+-
+nolds.sampen(data, emb_dim=2, tolerance=None, lag=1, dist=<function rowwise_chebyshev>, closed=False, debug_plot=False, debug_data=False, plot_file=None)[source]¶
Computes the sample entropy of the given data.
-
-- Explanation of the sample entropy:
-The sample entropy of a time series is defined as the negative natural
+
+- Explanation of the sample entropy:
The sample entropy of a time series is defined as the negative natural
logarithm of the conditional probability that two sequences similar for
emb_dim points remain similar at the next point, excluding self-matches.
-A lower value for the sample entropy therefore corresponds to a higher
+
A lower value for the sample entropy therefore corresponds to a higher
probability indicating more self-similarity.
-- Explanation of the algorithm:
-- The algorithm constructs all subsequences of length emb_dim
-[s_1, s_2, s_3, …] and then counts each pair (s_i, s_j) with i != j
+
- Explanation of the algorithm:
The algorithm constructs all subsequences of length emb_dim
+[s_1, s_1+lag, s_1+2*lag, …] and then counts each pair (s_i, s_j) with i != j
where dist(s_i, s_j) < tolerance. The same process is repeated for all
subsequences of length emb_dim + 1. The sum of similar sequence pairs
with length emb_dim + 1 is divided by the sum of similar sequence pairs
with length emb_dim. The result of the algorithm is the negative logarithm
-of this ratio/probability.
-- References:
-
-
-
-[se_1] J. S. Richman and J. R. Moorman, “Physiological time-series
+of this ratio/probability.
+
+- References:
-
+
+[se_1]
+
J. S. Richman and J. R. Moorman, “Physiological time-series
analysis using approximate entropy and sample entropy,”
American Journal of Physiology-Heart and Circulatory Physiology,
-vol. 278, no. 6, pp. H2039–H2049, 2000.
-
-
-
-- Reference code:
-
-
-
-[se_a] “sample_entropy” function in R-package “pracma”,
-url: https://cran.r-project.org/web/packages/pracma/pracma.pdf
-
-
-
-- Args:
-
-- data (array-like of float):
-- input data
-
-
-- Kwargs:
-
-- emb_dim (int):
-- the embedding dimension (length of vectors to compare)
-- tolerance (float):
-- distance threshold for two template vectors to be considered equal
-(default: 0.2 * std(data))
-- dist (function (2d-array, 1d-array) -> 1d-array):
-- distance function used to calculate the distance between template
-vectors. Sampen is defined using
rowwise_chebyshev
. You should only use
-something else, if you are sure that you need it.
-- debug_plot (boolean):
-- if True, a histogram of the individual distances for m and m+1
-- debug_data (boolean):
-- if True, debugging data will be returned alongside the result
-- plot_file (str):
-- if debug_plot is True and plot_file is not None, the plot will be saved
+vol. 278, no. 6, pp. H2039–H2049, 2000.
+
+
+
+Reference code:
+
+[se_a]
+“sample_entropy” function in R-package “pracma”,
+url: https://cran.r-project.org/web/packages/pracma/pracma.pdf
+
+
+
+Args:
+- data (array-like of float):
input data
+
+
+
+Kwargs:
+- emb_dim (int):
the embedding dimension (length of vectors to compare)
+
+- tolerance (float):
distance threshold for two template vectors to be considered equal
+(default: 0.2 * std(data) at emb_dim = 2, corrected for dimension effect
+for other values of emb_dim)
+
+- lag (int):
delay for the delay embedding
+
+- dist (function (2d-array, 1d-array) -> 1d-array):
distance function used to calculate the distance between template
+vectors. Sampen is defined using rowwise_chebyshev
. You should only
+use something else, if you are sure that you need it.
+
+- closed (boolean):
if True, will check for vector pairs whose distance is in the closed
+interval [0, r] (less or equal to r), otherwise the open interval
+[0, r) (less than r) will be used
+
+- debug_plot (boolean):
if True, a histogram of the individual distances for m and m+1
+
+- debug_data (boolean):
if True, debugging data will be returned alongside the result
+
+- plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
under the given file name instead of directly showing it through
-plt.show()
+plt.show()
+
-Returns:
-
-- float:
-- the sample entropy of the data (negative logarithm of ratio between
-similar template vectors of length emb_dim + 1 and emb_dim)
-- [float list, float list]:
-- Lists of lists of the form
[dists_m, dists_m1]
containing the distances
-between template vectors for m (dists_m) and for m + 1 (dists_m1).
+- Returns:
+- float:
the sample entropy of the data (negative logarithm of ratio between
+similar template vectors of length emb_dim + 1 and emb_dim)
+
+- [c_m, c_m1]:
list of two floats: count of similar template vectors of length emb_dim
+(c_m) and of length emb_dim + 1 (c_m1)
+
+- [float list, float list]:
Lists of lists of the form [dists_m, dists_m1]
containing the
+distances between template vectors for m (dists_m)
+and for m + 1 (dists_m1).
+
-
-
-Hurst exponent¶
-
--
-
nolds.
hurst_rs
(data, nvals=None, fit='RANSAC', debug_plot=False, debug_data=False, plot_file=None, corrected=True, unbiased=True)[source]¶
+
+
+Hurst exponent¶
+
+-
+nolds.hurst_rs(data, nvals=None, fit='RANSAC', debug_plot=False, debug_data=False, plot_file=None, corrected=True, unbiased=True)[source]¶
Calculates the Hurst exponent by a standard rescaled range (R/S) approach.
-
-- Explanation of Hurst exponent:
-The Hurst exponent is a measure for the “long-term memory” of a
+
+- Explanation of Hurst exponent:
The Hurst exponent is a measure for the “long-term memory” of a
time series, meaning the long statistical dependencies in the data that do
not originate from cycles.
It originates from H.E. Hursts observations of the problem of long-term
@@ -422,9 +404,9 @@
Hurst exponentIn this equation, K is called the Hurst exponent. Its value is 0.5 for
+In this equation, K is called the Hurst exponent. Its value is 0.5 for
white noise, but becomes greater for time series that exhibit some positive
dependency on previous values. For negative dependencies it becomes less
than 0.5.
-- Explanation of the algorithm:
-The rescaled range (R/S) approach is directly derived from Hurst’s
+
- Explanation of the algorithm:
The rescaled range (R/S) approach is directly derived from Hurst’s
definition. The time series of length N is split into non-overlapping
subseries of length n. Then, R and S (S = sigma) are calculated for each
subseries and the mean is taken over all subseries yielding (R/S)_n. This
@@ -450,570 +431,795 @@
Hurst exponent
+
-- binary_n: N/2, N/4, N/8, …
-- logarithmic_n: min_n, min_n * f, min_n * f^2, …
+binary_n: N/2, N/4, N/8, …
+logarithmic_n: min_n, min_n * f, min_n * f^2, …
-- References:
-
-
-
-[h_1] H. E. Hurst, “The problem of long-term storage in reservoirs,”
+- References:
-
+
+[h_1]
+
H. E. Hurst, “The problem of long-term storage in reservoirs,”
International Association of Scientific Hydrology. Bulletin, vol. 1,
-no. 3, pp. 13–27, 1956.
-
-
-
-
-
-[h_2] H. E. Hurst, “A suggested statistical model of some time series
-which occur in nature,” Nature, vol. 180, p. 494, 1957.
-
-
-
-
-
-[h_3] (1, 2) R. Weron, “Estimating long-range dependence: finite sample
+no. 3, pp. 13–27, 1956.
+
+
+[h_2]
+H. E. Hurst, “A suggested statistical model of some time series
+which occur in nature,” Nature, vol. 180, p. 494, 1957.
+
+
-
-
-
-- Reference Code:
-
-
-
-[h_a] “hurst” function in R-package “pracma”,
+and its Applications, vol. 312, no. 1, pp. 285–299, 2002.
+
+
+
+- Reference Code:
-
+
+[h_a]
+
“hurst” function in R-package “pracma”,
url: https://cran.r-project.org/web/packages/pracma/pracma.pdf
Note: Pracma yields several estimates of the Hurst exponent, which
are listed below. Unless otherwise stated they use the divisors
of the length of the sequence as n. The length is reduced by at
most 1% to find the value that has the most divisors.
-- The “Simple R/S” estimate is just log((R/S)_n) / log(n) for
-n = N.
-- The “theoretical Hurst exponent” is the value that would be
+
The “Simple R/S” estimate is just log((R/S)_n) / log(n) for
+n = N.
+The “theoretical Hurst exponent” is the value that would be
expected of an uncorrected rescaled range approach for random
-noise of the size of the input data.
-- The “empirical Hurst exponent” is the uncorrected Hurst exponent
-obtained by the rescaled range approach.
-- The “corrected empirical Hurst exponent” is the Anis-Lloyd-Peters
-corrected Hurst exponent, but with sqrt(1/2 * pi * n) added to
-the (R/S)_n before the log.
-- The “corrected R over S Hurst exponent” uses the R-function “lm”
+noise of the size of the input data.
+The “empirical Hurst exponent” is the uncorrected Hurst exponent
+obtained by the rescaled range approach.
+The “corrected empirical Hurst exponent” is the
+Anis-Lloyd-Peters corrected Hurst exponent, but with
+sqrt(1/2 * pi * n) added to the (R/S)_n before the log.
+The “corrected R over S Hurst exponent” uses the R-function “lm”
instead of pracmas own “polyfit” and uses n = N/2, N/4, N/8, …
by successively halving the subsequences (which means that some
subsequences may be one element longer than others). In contrast
to its name it does not use the Anis-Lloyd-Peters correction
-factor.
+factor.
-If you want to compare the output of pracma to the output of
+
If you want to compare the output of pracma to the output of
nolds, the “empirical hurst exponent” is the only measure that
exactly corresponds to the Hurst measure implemented in nolds
(by choosing corrected=False, fit=”poly” and employing the same
strategy for choosing n as the divisors of the (reduced)
sequence length).
-
-
-
-
-
-
-[h_b] Rafael Weron, “HURST: MATLAB function to compute the Hurst
+
+
+[h_b]
+Rafael Weron, “HURST: MATLAB function to compute the Hurst
exponent using R/S Analysis”,
url: https://ideas.repec.org/c/wuu/hscode/m11003.html
-Note: When the same values for nvals are used and fit is set to
+
Note: When the same values for nvals are used and fit is set to
“poly”, nolds yields exactly the same results as this
implementation.
-
-
-
-
-
-
-[h_c] Bill Davidson, “Hurst exponent”,
-url: http://www.mathworks.com/matlabcentral/fileexchange/9842-hurst-exponent
-
-
-
-
-
-[h_d] Tomaso Aste, “Generalized Hurst exponent”,
-url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent
-
-
-
-- Args:
-
-- data (array-like of float):
-- time series
-
-
-- Kwargs:
-
-- nvals (iterable of int):
-sizes of subseries to use
+
+
+[h_c]
+Bill Davidson, “Hurst exponent”,
+url: http://www.mathworks.com/matlabcentral/fileexchange/9842-hurst-exponent
+
+
+
+Args:
+- data (array-like of float):
time series
+
+
+
+Kwargs:
+- nvals (iterable of int):
sizes of subseries to use
(default: logmid_n(total_N, ratio=1/4.0, nsteps=15) , that is 15
logarithmically spaced values in the medium 25% of the logarithmic range)
-Generally, the choice for n is a trade-off between the length and the
+
Generally, the choice for n is a trade-off between the length and the
number of the subsequences that are used for the calculation of the
-(R/S)_n. Very low values of n lead to high variance in the r
and s
-while very high values may leave too few subsequences that the mean along
-them is still meaningful. Logarithmic spacing makes sense, because it
-translates to even spacing in the log-log-plot.
+(R/S)_n. Very low values of n lead to high variance in the r
and
+s
while very high values may leave too few subsequences that the mean
+along them is still meaningful. Logarithmic spacing makes sense, because
+it translates to even spacing in the log-log-plot.
-- fit (str):
-- the fitting method to use for the line fit, either ‘poly’ for normal
+
- fit (str):
the fitting method to use for the line fit, either ‘poly’ for normal
least squares polynomial fitting or ‘RANSAC’ for RANSAC-fitting which
-is more robust to outliers
-- debug_plot (boolean):
-- if True, a simple plot of the final line-fitting step will be shown
-- debug_data (boolean):
-- if True, debugging data will be returned alongside the result
-- plot_file (str):
-- if debug_plot is True and plot_file is not None, the plot will be saved
+is more robust to outliers
+
+- debug_plot (boolean):
if True, a simple plot of the final line-fitting step will be shown
+
+- debug_data (boolean):
if True, debugging data will be returned alongside the result
+
+- plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
under the given file name instead of directly showing it through
-plt.show()
-- corrected (boolean):
-- if True, the Anis-Lloyd-Peters correction factor will be applied to the
+
plt.show()
+
+- corrected (boolean):
if True, the Anis-Lloyd-Peters correction factor will be applied to the
output according to the expected value for the individual (R/S)_n
-(see [h_3])
-- unbiased (boolean):
-- if True, the standard deviation based on the unbiased variance
+(see [h_3])
+
+- unbiased (boolean):
if True, the standard deviation based on the unbiased variance
(1/(N-1) instead of 1/N) will be used. This should be the default choice,
since the true mean of the sequences is not known. This parameter should
-only be changed to recreate results of other implementations.
+only be changed to recreate results of other implementations.
+
-Returns:
-
-- float:
-- estimated Hurst exponent K using a rescaled range approach (if K = 0.5
+
- Returns:
+- float:
estimated Hurst exponent K using a rescaled range approach (if K = 0.5
there are no long-range correlations in the data, if K < 0.5 there are
negative long-range correlations, if K > 0.5 there are positive
-long-range correlations)
-- (1d-vector, 1d-vector, list):
-- only present if debug_data is True: debug data of the form
-
(nvals, rsvals, poly)
where nvals
are the values used for log(n),
-rsvals
are the corresponding log((R/S)_n) and poly
are the line
-coefficients ([slope, intercept]
)
+long-range correlations)
+
+- (1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
+(nvals, rsvals, poly)
where nvals
are the values used for log(n),
+rsvals
are the corresponding log((R/S)_n) and poly
are the line
+coefficients ([slope, intercept]
)
+
-
-
-Correlation dimension¶
-
--
-
nolds.
corr_dim
(data, emb_dim, rvals=None, dist=<function rowwise_euclidean>, fit='RANSAC', debug_plot=False, debug_data=False, plot_file=None)[source]¶
+
+
+Correlation dimension¶
+
+-
+nolds.corr_dim(data, emb_dim, lag=1, rvals=None, dist=<function rowwise_euclidean>, fit='RANSAC', debug_plot=False, debug_data=False, plot_file=None)[source]¶
Calculates the correlation dimension with the Grassberger-Procaccia algorithm
-
-- Explanation of correlation dimension:
-The correlation dimension is a characteristic measure that can be used
+
+- Explanation of correlation dimension:
The correlation dimension is a characteristic measure that can be used
to describe the geometry of chaotic attractors. It is defined using the
correlation sum C(r) which is the fraction of pairs of points X_i in the
phase space whose distance is smaller than r.
If the relation between C(r) and r can be described by the power law
C(r) ~ r^D
then D is called the correlation dimension of the system.
-In a d-dimensional system, the maximum value for D is d. This value is
+
In a d-dimensional system, the maximum value for D is d. This value is
obtained for systems that expand uniformly in each dimension with time.
The lowest possible value is 0 for a system with constant C(r) (i.e. a
system that visits just one point in the phase space). Generally if D is
lower than d and the system has an attractor, this attractor is called
“strange” and D is a measure of this “strangeness”.
-- Explanation of the algorithm:
-The Grassberger-Procaccia algorithm calculates C(r) for a range of
+
- Explanation of the algorithm:
The Grassberger-Procaccia algorithm calculates C(r) for a range of
different r and then fits a straight line into the plot of log(C(r))
versus log(r).
-This version of the algorithm is created for one-dimensional (scalar) time
+
This version of the algorithm is created for one-dimensional (scalar) time
series. Therefore, before calculating C(r), a delay embedding of the time
series is performed to yield emb_dim dimensional vectors
-Y_i = [X_i, X_(i+1), X_(i+2), … X_(i+embd_dim-1)]. Choosing a higher
-value for emb_dim allows to reconstruct higher dimensional dynamics and
-avoids “systematic errors due to corrections to scaling”.
-
-- References:
-
-
-
-[cd_1] P. Grassberger and I. Procaccia, “Characterization of strange
+Y_i = [X_i, X_(i+1*lag), X_(i+2*lag), … X_(i+(embd_dim-1)*lag)]. Choosing
+a higher value for emb_dim allows to reconstruct higher dimensional dynamics
+and avoids “systematic errors due to corrections to scaling”. Choosing a
+higher value for lag allows to avoid overestimating correlation because
+X_i ~= X_i+1, but it should also not be set too high to not underestimate
+correlation due to exponential divergence of trajectories in chaotic systems.
+
+- References:
-
+
+[cd_1]
+
P. Grassberger and I. Procaccia, “Characterization of strange
attractors,” Physical review letters, vol. 50, no. 5, p. 346,
-1983.
-
-
-
-
-
-[cd_2] P. Grassberger and I. Procaccia, “Measuring the strangeness of
+1983.
+
+
+[cd_2]
+P. Grassberger and I. Procaccia, “Measuring the strangeness of
strange attractors,” Physica D: Nonlinear Phenomena, vol. 9,
-no. 1, pp. 189–208, 1983.
-
-
-
-
-
-[cd_3] P. Grassberger, “Grassberger-Procaccia algorithm,”
+no. 1, pp. 189–208, 1983.
+
+
+[cd_3]
+P. Grassberger, “Grassberger-Procaccia algorithm,”
Scholarpedia, vol. 2, no. 5, p. 3043.
-urL: http://www.scholarpedia.org/article/Grassberger-Procaccia_algorithm
-
-
-
-- Reference Code:
-
-
-
-[cd_a] “corrDim” function in R package “fractal”,
-url: https://cran.r-project.org/web/packages/fractal/fractal.pdf
-
-
-
-
-
-[cd_b] Peng Yuehua, “Correlation dimension”,
-url: http://de.mathworks.com/matlabcentral/fileexchange/24089-correlation-dimension
-
-
-
-- Args:
-
-- data (array-like of float):
-- time series of data points
-- emb_dim (int):
-- embedding dimension
-
-
-- Kwargs:
-
-- rvals (iterable of float):
-- list of values for to use for r
-(default: logarithmic_r(0.1 * std, 0.5 * std, 1.03))
-- dist (function (2d-array, 1d-array) -> 1d-array):
-- row-wise difference function
-- fit (str):
-- the fitting method to use for the line fit, either ‘poly’ for normal
+urL: http://www.scholarpedia.org/article/Grassberger-Procaccia_algorithm
+
+
+
+
Reference Code:
+
+[cd_a]
+“corrDim” function in R package “fractal”,
+url: https://cran.r-project.org/web/packages/fractal/fractal.pdf
+
+
+[cd_b]
+Peng Yuehua, “Correlation dimension”,
+url: http://de.mathworks.com/matlabcentral/fileexchange/24089-correlation-dimension
+
+
+
+Args:
+- data (array-like of float):
time series of data points
+
+- emb_dim (int):
embedding dimension
+
+
+
+Kwargs:
+- rvals (iterable of float):
list of values for to use for r
+(default: logarithmic_r(0.1 * std, 0.5 * std, 1.03))
+
+- dist (function (2d-array, 1d-array) -> 1d-array):
row-wise difference function
+
+- fit (str):
the fitting method to use for the line fit, either ‘poly’ for normal
least squares polynomial fitting or ‘RANSAC’ for RANSAC-fitting which
-is more robust to outliers
-- debug_plot (boolean):
-- if True, a simple plot of the final line-fitting step will be shown
-- debug_data (boolean):
-- if True, debugging data will be returned alongside the result
-- plot_file (str):
-- if debug_plot is True and plot_file is not None, the plot will be saved
+is more robust to outliers
+
+- debug_plot (boolean):
if True, a simple plot of the final line-fitting step will be shown
+
+- debug_data (boolean):
if True, debugging data will be returned alongside the result
+
+- plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
under the given file name instead of directly showing it through
-plt.show()
+plt.show()
+
-Returns:
-
-- float:
-- correlation dimension as slope of the line fitted to log(r) vs log(C(r))
-- (1d-vector, 1d-vector, list):
-- only present if debug_data is True: debug data of the form
-
(rvals, csums, poly)
where rvals
are the values used for log(r),
-csums
are the corresponding log(C(r)) and poly
are the line
-coefficients ([slope, intercept]
)
+- Returns:
+- float:
correlation dimension as slope of the line fitted to log(r) vs log(C(r))
+
+- (1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
+(rvals, csums, poly)
where rvals
are the values used for log(r),
+csums
are the corresponding log(C(r)) and poly
are the line
+coefficients ([slope, intercept]
)
+
-
-
-Detrended fluctuation analysis¶
-
--
-
nolds.
dfa
(data, nvals=None, overlap=True, order=1, fit_trend='poly', fit_exp='RANSAC', debug_plot=False, debug_data=False, plot_file=None)[source]¶
+
+
+Detrended fluctuation analysis¶
+
+-
+nolds.dfa(data, nvals=None, overlap=True, order=1, fit_trend='poly', fit_exp='RANSAC', debug_plot=False, debug_data=False, plot_file=None)[source]¶
Performs a detrended fluctuation analysis (DFA) on the given data
-
-- Recommendations for parameter settings by Hardstone et al.:
-
-- nvals should be equally spaced on a logarithmic scale so that each window
-scale hase the same weight
-- min(nvals) < 4 does not make much sense as fitting a polynomial (even if
-it is only of order 1) to 3 or less data points is very prone.
-- max(nvals) > len(data) / 10 does not make much sense as we will then have
-less than 10 windows to calculate the average fluctuation
-- use overlap=True to obtain more windows and therefore better statistics
-(at an increased computational cost)
+
+- Recommendations for parameter settings by Hardstone et al.:
+nvals should be equally spaced on a logarithmic scale so that each window
+scale hase the same weight
+min(nvals) < 4 does not make much sense as fitting a polynomial (even if
+it is only of order 1) to 3 or less data points is very prone to errors.
+max(nvals) > len(data) / 10 does not make much sense as we will then have
+less than 10 windows to calculate the average fluctuation
+use overlap=True to obtain more windows and therefore better statistics
+(at an increased computational cost)
-- Explanation of DFA:
-Detrended fluctuation analysis, much like the Hurst exponent, is used to
-find long-term statistical dependencies in time series.
-The idea behind DFA originates from the definition of self-affine
-processes. A process X is said to be self-affine if the standard deviation
-of the values within a window of length n changes with the window length
-factor L in a power law:
-std(X,L * n) = L^H * std(X, n)
-where std(X, k) is the standard deviation of the process X calculated over
-windows of size k. In this equation, H is called the Hurst parameter, which
-behaves indeed very similar to the Hurst exponent.
-Like the Hurst exponent, H can be obtained from a time series by
-calculating std(X,n) for different n and fitting a straight line to the
-plot of log(std(X,n)) versus log(n).
-To calculate a single std(X,n), the time series is split into windows of
-equal length n, so that the ith window of this size has the form
-W_(n,i) = [x_i, x_(i+1), x_(i+2), … x_(i+n-1)]
-The value std(X,n) is then obtained by calculating std(W_(n,i)) for each i
-and averaging the obtained values over i.
-The aforementioned definition of self-affinity, however, assumes that the
-process is non-stationary (i.e. that the standard deviation changes over
-time) and it is highly influenced by local and global trends of the time
-series.
-To overcome these problems, an estimate alpha of H is calculated by using a
-“walk” or “signal profile” instead of the raw time series. This walk is
-obtained by substracting the mean and then taking the cumulative sum of the
-original time series. The local trends are removed for each window
-separately by fitting a polynomial p_(n,i) to the window W_(n,i) and then
-calculating W’_(n,i) = W_(n,i) - p_(n,i) (element-wise substraction).
-We then calculate std(X,n) as before only using the “detrended” window
-W’_(n,i) instead of W_(n,i). Instead of H we obtain the parameter alpha
-from the line fitting.
-For alpha < 1 the underlying process is stationary and can be modelled as
-fractional Gaussian noise with H = alpha. This means for alpha = 0.5 we
-have no correlation or “memory”, for 0.5 < alpha < 1 we have a memory with
-positive correlation and for alpha < 0.5 the correlation is negative.
-For alpha > 1 the underlying process is non-stationary and can be modeled
+
- Explanation of DFA:
Detrended fluctuation analysis, much like the Hurst exponent, is used to
+find long-term statistical dependencies in time series. However, while the
+Hurst exponent will indicate long-term correlations for any non-stationary
+process (i.e. a stochastic process whose probability distribution changes
+when shifted in time, such as a random walk whose mean changes over time),
+DFA was designed to distinguish between correlations that are purely an
+artifact of non-stationarity and those that show inherent long-term
+behavior of the studied system.
+Mathematically, the long-term correlations that we are interested in can
+be characterized using the autocorrelation function C(s). For a time series
+(x_i) with i = 1, …, N it is defined as follows:
+C(s) = 1/(N-s) * (y_1 * y_1+s + y_2 * y_2+s + … y_(N-s) * y_N)
+with y_i = x_i - mean(x). If there are no correlations at all, C(s) would
+be zero for s > 0. For short-range correlations, C(s) will decline
+exponentially, but for long-term correlations the decline follows a power
+law of the form C(s) ~ s^(-gamma) instead with 0 < gamma < 1.
+Due to noise and underlying trends, calculating C(s) directly is usually not
+feasible. The main idea of DFA is therefore to remove trends up to a given
+order from the input data and analyze the remaining fluctuations. Trends
+in this sense are smooth signals with monotonous or slowly oscillating
+behavior that are caused by external effects and not the dynamical system
+under study.
+To get a hold of these trends, the first step is to calculate the “profile”
+of our time series as the cumulative sum of deviations from the mean,
+effectively integrating our data. This both smoothes out measurement noise
+and makes it easier to distinguish the fractal properties of bounded time
+series (i.e. time series whose values cannot grow or shrink beyond certain
+bounds such as most biological or physical signals) by applying random walk
+theory (see [dfa_3] and [dfa_4]).
+y_i = x_1 - mean(x) + x_2 - mean(x) + … + x_i - mean(x).
+After that, we split Y(i) into (usually non-overlapping) windows of length
+n to calculate local trends at this given scale. The ith window of this
+size has the form
+W_(n,i) = [y_i, y_(i+1), y_(i+2), … y_(i+n-1)]
+The local trends are then removed for each window separately by fitting a
+polynomial p_(n,i) to the window W_(n,i) and then calculating
+W’_(n,i) = W_(n,i) - p_(n,i) (element-wise subtraction).
+This leaves us with the deviations from the trend - the “fluctuations” -
+that we are interested in. To quantify them, we take the root mean square
+of these fluctuations. It is important to note that we have to sum up all
+individual fluctuations across all windows and divide by the total number
+of fluctuations here before finally taking the root as last step. Some
+implementations apply another root per window, which skews the result.
+The resulting fluctuation F(n) is then only dependent on the window size n,
+the scale at which we observe our data. It behaves similar to the
+autocorrelation function in that it follows a power-law for long-term
+correlations:
+F(n) ~ n^alpha
+Where alpha is the Hurst parameter, which we can obtain from fitting a line
+into the plot of log(n) versus log(F(n)) and taking the slope.
+The result can be interpreted as follows: For alpha < 1 the underlying
+process is stationary and can be modelled as fractional Gaussian noise with
+H = alpha. This means for alpha = 0.5 we have no long-term correlation or
+“memory”, for 0.5 < alpha < 1 we have positive long-term correlations and
+for alpha < 0.5 the long-term correlations are negative.
+For alpha > 1 the underlying process is non-stationary and can be modeled
as fractional Brownian motion with H = alpha - 1.
-- References:
-
-
-
-[dfa_1] C.-K. Peng, S. V. Buldyrev, S. Havlin, M. Simons,
+- References:
-
+
+[dfa_1]
+
C.-K. Peng, S. V. Buldyrev, S. Havlin, M. Simons,
H. E. Stanley, and A. L. Goldberger, “Mosaic organization of
-DNA nucleotides,” Physical Review E, vol. 49, no. 2, 1994.
-
-
-
-
-
-[dfa_2] R. Hardstone, S.-S. Poil, G. Schiavone, R. Jansen,
+DNA nucleotides,” Physical Review E, vol. 49, no. 2, 1994.
+
+
+[dfa_2]
+J. W. Kantelhardt, E. Koscielny-Bunde, H. H. A. Rego, S.
+Havlin, and A. Bunde, “Detecting long-range correlations with
+detrended fluctuation analysis,” Physica A: Statistical
+Mechanics and its Applications, vol. 295, no. 3–4, pp. 441–454,
+Jun. 2001, doi: 10.1016/S0378-4371(01)00144-3.
+
+
+[dfa_3]
+C. Peng, J. M. Hausdorff, and A. L. Goldberger, “Fractal
+mechanisms in neuronal control: human heartbeat and gait
+dynamics in health and disease,” in Self-Organized Biological
+Dynamics and Nonlinear Control, 1st ed., J. Walleczek, Ed.,
+Cambridge University Press, 2000, pp. 66–96.
+doi: 10.1017/CBO9780511535338.006.
+
+
+[dfa_4]
+A. Bashan, R. Bartsch, J. W. Kantelhardt, and S. Havlin,
+“Comparison of detrending methods for fluctuation analysis,”
+Physica A: Statistical Mechanics and its Applications, vol. 387,
+no. 21, pp. 5080–5090, Sep. 2008,
+doi: 10.1016/j.physa.2008.04.023.
+
+
+[dfa_5]
+R. Hardstone, S.-S. Poil, G. Schiavone, R. Jansen,
V. V. Nikulin, H. D. Mansvelder, and K. Linkenkaer-Hansen,
“Detrended fluctuation analysis: A scale-free view on neuronal
-oscillations,” Frontiers in Physiology, vol. 30, 2012.
-
-
-
-- Reference code:
-
-
-
-[dfa_a] Peter Jurica, “Introduction to MDFA in Python”,
-url: http://bsp.brain.riken.jp/~juricap/mdfa/mdfaintro.html
-
-
-
-
-
-[dfa_b] JE Mietus, “dfa”,
-url: https://www.physionet.org/physiotools/dfa/dfa-1.htm
-
-
-
-
-
-[dfa_c] “DFA” function in R package “fractal”
-
-
-
-- Args:
-
-- data (array-like of float):
-- time series
-
-
-- Kwargs:
-
-- nvals (iterable of int):
-- subseries sizes at which to calculate fluctuation
-(default: logarithmic_n(4, 0.1*len(data), 1.2))
-- overlap (boolean):
-- if True, the windows W_(n,i) will have a 50% overlap,
-otherwise non-overlapping windows will be used
-- order (int):
-- (polynomial) order of trend to remove
-- fit_trend (str):
-- the fitting method to use for fitting the trends, either ‘poly’
+oscillations,” Frontiers in Physiology, vol. 30, 2012.
+
+
+
+
Reference code:
+
+[dfa_a]
+Peter Jurica, “Introduction to MDFA in Python”,
+url: http://bsp.brain.riken.jp/~juricap/mdfa/mdfaintro.html
+
+
+[dfa_b]
+JE Mietus, “dfa”,
+url: https://www.physionet.org/physiotools/dfa/dfa-1.htm
+
+
+[dfa_c]
+“DFA” function in R package “fractal”
+
+
+
+Args:
+- data (array-like of float):
time series
+
+
+
+Kwargs:
+- nvals (iterable of int):
subseries sizes at which to calculate fluctuation
+(default: logarithmic_n(4, 0.1*len(data), 1.2))
+
+- overlap (boolean):
if True, the windows W_(n,i) will have a 50% overlap,
+otherwise non-overlapping windows will be used
+
+- order (int):
(polynomial) order of trend to remove
+
+- fit_trend (str):
the fitting method to use for fitting the trends, either ‘poly’
for normal least squares polynomial fitting or ‘RANSAC’ for
RANSAC-fitting which is more robust to outliers but also tends to
-lead to unstable results
-- fit_exp (str):
-- the fitting method to use for the line fit, either ‘poly’ for normal
+lead to unstable results
+
+- fit_exp (str):
the fitting method to use for the line fit, either ‘poly’ for normal
least squares polynomial fitting or ‘RANSAC’ for RANSAC-fitting which
-is more robust to outliers
-- debug_plot (boolean):
-- if True, a simple plot of the final line-fitting step will be shown
-- debug_data (boolean):
-- if True, debugging data will be returned alongside the result
-- plot_file (str):
-- if debug_plot is True and plot_file is not None, the plot will be saved
+is more robust to outliers
+
+- debug_plot (boolean):
if True, a simple plot of the final line-fitting step will be shown
+
+- debug_data (boolean):
if True, debugging data will be returned alongside the result
+
+- plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
under the given file name instead of directly showing it through
-plt.show()
+plt.show()
+
-Returns:
-
-- float:
-- the estimate alpha for the Hurst parameter (alpha < 1: stationary
+
- Returns:
+- float:
the estimate alpha for the Hurst parameter (alpha < 1: stationary
process similar to fractional Gaussian noise with H = alpha,
alpha > 1: non-stationary process similar to fractional Brownian
-motion with H = alpha - 1)
-- (1d-vector, 1d-vector, list):
-- only present if debug_data is True: debug data of the form
-
(nvals, fluctuations, poly)
where nvals
are the values used for
-log(n), fluctuations
are the corresponding log(std(X,n)) and poly
-are the line coefficients ([slope, intercept]
)
+motion with H = alpha - 1)
+
+- (1d-vector, 1d-vector, list):
only present if debug_data is True: debug data of the form
+(nvals, fluctuations, poly)
where nvals
are the values used for
+log(n), fluctuations
are the corresponding log(std(X,n)) and poly
+are the line coefficients ([slope, intercept]
)
+
+
+
+Generalized Hurst Exponent (Barabási et al.)¶
+
+-
+nolds.mfhurst_b(data, qvals=None, dists=None, fit='poly', debug_plot=False, debug_data=False, plot_file=None)[source]¶
+Calculates the Generalized Hurst Exponent H_q for different q according to
+A.-L. Barabási and T. Vicsek.
+
+- Explanation of the Generalized Hurst Exponent:
The Generalized Hurst Exponent (GHE, H_q or H(q)) can (as the name implies)
+be seen as a generalization of the Hurst exponent for data series with
+multifractal properties. It’s origins are however not directly related
+to Hurst’s rescaled range approach, but to the definition of self-affine
+functions.
+A single-valued self-affine function h by definition satisfies the relation
+
+h(x) ~= lambda^(-H) h(lambda x)
+
+for any positive real valued lambda and some positive real valued exponent
+H, which is called the Hurst, Hölder, Hurst-Hölder or roughness exponent
+in the literature. In other words you can view lambda as a scaling factor
+or “step size”. With lambda < 1 we decrease the step size and zoom into our
+function. In this case lambda^(-H) becomes greater than one, meaning that
+h(lambda x) looks similar to a smaller version of h(x). With lambda > 1 we
+zoom out and get lambda^(-H) < 1.
+To calculate H, you can use the height-height correlation function (also
+called autocorrelation) c(d) = <(h(x) - h(x + d))^2>_x where <…>_x
+denotes the expected value over x. Here, the aforementioned self-affine
+property is equivalent to c(d) ~ d^(2H). You can also think of d as a step
+size. Increasing or decreasing d from 1 to some y is the same as setting
+lambda = y: It increases or decreases the scale of the function by a factor
+of 1/y^(-H) = y^H. Therefore the squared differences will be proportional
+to y^2H.
+A.-L. Barabási and T. Vicsek extended this notion to an infinite hierarchy
+of exponents H_q for the qth-order correlation function with
+
+c_q(d) = <(h(x) - h(x + d))^q>_x ~ d^(q H_q)
+
+With q = 1 you get a value H_1 that is closely related to the normal Hurst
+exponent, but with different q you either get a constant value H_q = H_0
+independent of q, which indicates that the function has no multifractal
+properties, or different H_q, which is a sign for multifractal behavior.
+T. Di Matteo, T. Aste and M. M. Dacorogna applied this technique to
+financial data series and gave it the name “Generalized Hurst Exponent”.
+
+- Explanation of the Algorithm:
Curiously, I could not find any algorithmic description how to calculate
+H_q in the literature. Researchers seem to just imply that you can obtain
+the exponent by a line fitting algorithm in a log-log plot, but they do not
+talk about the actual procedure or the required parameters.
+Essentially, we can calculate c_q(d) of a discrete evenly sampled time
+series Y = [y_0, y_1, y_2, … y_(N-1)] by taking the absolute differences
+[|y_0 - y_d|, |y_1 - y_(d+1)|, … , |y_(N-d-1) - y_(N-1)|] raising them to
+the qth power and taking the mean.
+Now we take the logarithm on both sides of our relation c_q(d) ~ d^(q H_q)
+and get
+log(c_q(d)) ~ log(d) * q H_q
+So in other words if we plot log(c_q(d)) against log(d) for several d we
+should get a straight line with slope q H_q. This enables us to use a
+linear least squares algorithm to obtain H_q.
+Note that we consider x as a discrete variable in the range 0 <= x < N.
+We can do this, because the actual sampling rate of our data series does
+not alter the result. After taking the logarithm any scaling factor delta_x
+would only result in an additive term since
+log(delta_x * x) = log(x) + log(delta_x) and we only care about the slope
+of the line and not the intercept.
+
+- References:
-
+
+[mh_1]
+
A.-L. Barabási and T. Vicsek, “Multifractality of self-affine
+fractals,” Physical Review A, vol. 44, no. 4, pp. 2730–2733, 1991.
-
-Helper functions¶
-
--
-
nolds.
binary_n
(total_N, min_n=50)[source]¶
+
+- Args:
+- data (array-like of float):
time series of data points (should be evenly sampled)
+
+
+
+- Kwargs:
+- qvals (iterable of float or int):
values of q for which H_q should be calculated (default: [1])
+
+- dists (iterable of int):
distances for which the height-height correlation should be calculated
+(determines the x-coordinates in the log-log plot)
+default: logarithmic_n(1, max(20, 0.02 * len(data)), 1.5) to ensure
+even spacing on the logarithmic axis
+
+- fit (str):
the fitting method to use for the line fit, either ‘poly’ for normal
+least squares polynomial fitting or ‘RANSAC’ for RANSAC-fitting which
+is more robust to outliers
+
+- debug_plot (boolean):
if True, a simple plot of the final line-fitting step will be shown
+
+- debug_data (boolean):
if True, debugging data will be returned alongside the result
+
+- plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
+under the given file name instead of directly showing it through
+plt.show()
+
+
+
+- Returns:
+- array of float:
list of H_q for every q given in qvals
+
+- (1d-vector, 2d-vector, 2d-vector):
only present if debug_data is True: debug data of the form
+(xvals, yvals, poly)
where xvals
is the logarithm of dists
,
+yvals
are the logarithms of the corresponding height-height-
+correlations for each distance (first dimension) and each q
+(second dimension) in the shape len(dists) x len(qvals) and poly
are
+the line coefficients ([slope, intercept]
) for each q in the shape
+len(qvals) x 2.
+
+
+
+
+
+
+
+
+Generalized Hurst Exponent (Di Matteo et al.)¶
+
+-
+nolds.mfhurst_dm(data, qvals=None, max_dists=range(5, 20), detrend=True, fit='poly', debug_plot=False, debug_data=False, plot_file=None)[source]¶
+Calculates the Generalized Hurst Exponent H_q for different q according to
+the MATLAB code of Tomaso Aste - one of the authors that introduced this
+measure.
+
+- Explanation of the General Hurst Exponent:
See mfhurst_b.
+
+
+Warning: I do not recommend to use this function unless you want to reproduce
+examples from Di Matteo et al.. From my experiments and a critical code
+analysis it seems that mfhurst_b should provide more robust results.
+The design choices that make mfhurst_dm different than mfhurst_d are the
+following:
+
+
+- By default, a linear trend is removed from the data. This can be sensible
in some application areas (such as stock market analysis), but I think
+this should be an additional preprocessing step and not part of this
+algorithm.
+
+
+
+
+- In the calculation of the height-height correlations, the differences
(h(x) - h(x + d) are not calculated for every possible x from 0 to N-d-1,
+but instead d is used as a step size for x. I see no justification for
+this choice. It makes the algorithm run faster, but it also takes away
+a lot of statistical robustness, especially for large values of d.
+This effect can be clearly seen when setting debug_plot to True.
+
+
+
+
+- The algorithm uses a linear scale for the distance values d = 1, 2, 3,
…, tau_max. This is counter intuitive, since we later plot log(d)
+against log(c_q(d)). A linear scale will have a bias towards larger
+values in the logarithmic scale. A logarithmic scale for d seems to be
+a more natural fit. If low values of d yield statistically unstable
+results, they should simply be omitted.
+
+
+
+
+- The algorithm tests multiple values for tau_max, which is the maximum
distance that will be calculated. In [mhd_1] the authors state that this
+is done to test the robustness of the approach. However, taking the
+mean of several runs with different tau_max will not produce any more
+information than performing one run with the largest tau_max. Instead
+it will only introduce a bias towards low values for d.
+
+
+
+
+
+- References:
-
+
+[mhd_1]
+
T. Di Matteo, T. Aste, and M. M. Dacorogna, “Scaling behaviors
+in differently developed markets,” Physica A: Statistical Mechanics
+and its Applications, vol. 324, no. 1–2, pp. 183–188, 2003.
+
+
+
+- Reference code:
-
+
+[mhd_a]
+
Tomaso Aste, “Generalized Hurst exponent”,
+url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent
+
+
+
+- Args:
+- data (1d-vector of float):
input data (should be evenly sampled)
+
+- qvals (1d-vector of float)
values of q for which H_q should be calculated (default: [1])
+
+
+
+- Kwargs:
+- max_dists (1d-vector of int):
different values to test for tau_max, the maximum value for the distance
+d. The resulting H_q will be a mean of all H_q calculated with tau_max
+= max_dists[0], max_dists[1], … .
+
+- detrend (boolean):
if True, a linear trend will be removed from the data before H_q will
+be calculated
+
+- fit (str):
the fitting method to use for the line fit, either ‘poly’ for normal
+least squares polynomial fitting or ‘RANSAC’ for RANSAC-fitting which
+is more robust to outliers
+
+- debug_plot (boolean):
if True, a simple plot of the final line-fitting step will be shown
+
+- debug_data (boolean):
if True, debugging data will be returned alongside the result
+
+- plot_file (str):
if debug_plot is True and plot_file is not None, the plot will be saved
+under the given file name instead of directly showing it through
+plt.show()
+
+
+
+- Returns:
+- array of float:
array of mH_q for every q given in qvals
where mH_q is the mean of
+all H_q calculated for different max distances in max_dists.
+
+- array of float:
array of standard deviations sH_q for each mH_q returned
+
+- (1d-vector, 2d-vector, 2d-vector):
only present if debug_data is True: debug data of the form
+(xvals, yvals, poly)
where xvals
is the logarithm of dists
,
+yvals
are the logarithms of the corresponding height-height-
+correlations for each distance (first dimension) and each q
+(second dimension) in the shape len(dists) x len(qvals) and poly
are
+the line coefficients ([slope, intercept]
) for each q in the shape
+len(qvals) x 2.
+
+
+
+
+
+
+
+
+
+Helper functions¶
+
+-
+nolds.binary_n(total_N, min_n=50)[source]¶
Creates a list of values by successively halving the total length total_N
until the resulting value is less than min_n.
Non-integer results are rounded down.
-
-- Args:
-
-- total_N (int):
-- total length
+
+- Args:
+- total_N (int):
total length
+
-- Kwargs:
-
-- min_n (int):
-- minimal length after division
+- Kwargs:
+- min_n (int):
minimal length after division
+
-- Returns:
-
-- list of integers:
-- total_N/2, total_N/4, total_N/8, … until total_N/2^i < min_n
+- Returns:
+- list of integers:
total_N/2, total_N/4, total_N/8, … until total_N/2^i < min_n
+
-
--
-
nolds.
logarithmic_n
(min_n, max_n, factor)[source]¶
+
+-
+nolds.logarithmic_n(min_n, max_n, factor)[source]¶
Creates a list of values by successively multiplying a minimum value min_n by
a factor > 1 until a maximum value max_n is reached.
Non-integer results are rounded down.
-
-- Args:
-
-- min_n (float):
-- minimum value (must be < max_n)
-- max_n (float):
-- maximum value (must be > min_n)
-- factor (float):
-- factor used to increase min_n (must be > 1)
+
+- Args:
+- min_n (float):
minimum value (must be < max_n)
+
+- max_n (float):
maximum value (must be > min_n)
+
+- factor (float):
factor used to increase min_n (must be > 1)
+
-- Returns:
-
-- list of integers:
-- min_n, min_n * factor, min_n * factor^2, … min_n * factor^i < max_n
-without duplicates
+- Returns:
+- list of integers:
min_n, min_n * factor, min_n * factor^2, … min_n * factor^i < max_n
+without duplicates
+
-
--
-
nolds.
logarithmic_r
(min_n, max_n, factor)[source]¶
+
+-
+nolds.logarithmic_r(min_n, max_n, factor)[source]¶
Creates a list of values by successively multiplying a minimum value min_n by
a factor > 1 until a maximum value max_n is reached.
-
-- Args:
-
-- min_n (float):
-- minimum value (must be < max_n)
-- max_n (float):
-- maximum value (must be > min_n)
-- factor (float):
-- factor used to increase min_n (must be > 1)
+
+- Args:
+- min_n (float):
minimum value (must be < max_n)
+
+- max_n (float):
maximum value (must be > min_n)
+
+- factor (float):
factor used to increase min_n (must be > 1)
+
-- Returns:
-
-- list of floats:
-- min_n, min_n * factor, min_n * factor^2, … min_n * factor^i < max_n
+- Returns:
+- list of floats:
min_n, min_n * factor, min_n * factor^2, … min_n * factor^i < max_n
+
-
--
-
nolds.
expected_h
(nvals, fit='RANSAC')[source]¶
+
+-
+nolds.expected_h(nvals, fit='RANSAC')[source]¶
Uses expected_rs to calculate the expected value for the Hurst exponent h
based on the values of n used for the calculation.
-
-- Args:
-
-- nvals (iterable of int):
-- the values of n used to calculate the individual (R/S)_n
+
+- Args:
+- nvals (iterable of int):
the values of n used to calculate the individual (R/S)_n
+
-- KWargs:
-
-- fit (str):
-- the fitting method to use for the line fit, either ‘poly’ for normal
+
- KWargs:
+- fit (str):
the fitting method to use for the line fit, either ‘poly’ for normal
least squares polynomial fitting or ‘RANSAC’ for RANSAC-fitting which
-is more robust to outliers
+is more robust to outliers
+
-- Returns:
-
-- float:
-- expected h for white noise
+- Returns:
+- float:
expected h for white noise
+
-
--
-
nolds.
expected_rs
(n)[source]¶
+
+-
+nolds.expected_rs(n)[source]¶
Calculates the expected (R/S)_n for white noise for a given n.
This is used as a correction factor in the function hurst_rs. It uses the
-formula of Anis-Lloyd-Peters (see [h_3]).
-
-- Args:
-
-- n (int):
-- the value of n for which the expected (R/S)_n should be calculated
+formula of Anis-Lloyd-Peters (see [h_3]).
+
+- Args:
+- n (int):
the value of n for which the expected (R/S)_n should be calculated
+
-- Returns:
-
-- float:
-- expected (R/S)_n for white noise
+- Returns:
+- float:
expected (R/S)_n for white noise
+
-
--
-
nolds.
logmid_n
(max_n, ratio=0.25, nsteps=15)[source]¶
+
+-
+nolds.logmid_n(max_n, ratio=0.25, nsteps=15)[source]¶
Creates an array of integers that lie evenly spaced in the “middle” of the
logarithmic scale from 0 to log(max_n).
If max_n is very small and/or nsteps is very large, this may lead to
@@ -1021,148 +1227,262 @@
Helper functions
-- Args:
-
-- max_n (int):
-- largest possible output value (should be the sequence length when used in
-hurst_rs)
-
-
-- Kwargs:
-
-- ratio (float):
-- width of the “middle” of the logarithmic interval relative to log(max_n).
+
+- Args:
+- max_n (int):
largest possible output value (should be the sequence length when used in
+hurst_rs)
+
+
+
+- Kwargs:
+- ratio (float):
width of the “middle” of the logarithmic interval relative to log(max_n).
For example, for ratio=1/2.0 the logarithm of the resulting values will
-lie between 0.25 * log(max_n) and 0.75 * log(max_n).
-- nsteps (float):
-- (maximum) number of values to take from the specified range
+lie between 0.25 * log(max_n) and 0.75 * log(max_n).
+
+- nsteps (float):
(maximum) number of values to take from the specified range
+
-- Returns:
-
-- array of int:
-- a logarithmically spaced sequence of at most nsteps values (may be less,
-because only unique values are returned)
+- Returns:
+- array of int:
a logarithmically spaced sequence of at most nsteps values (may be less,
+because only unique values are returned)
+
-
--
-
nolds.
lyap_r_len
(**kwargs)[source]¶
+
+-
+nolds.lyap_r_len(**kwargs)[source]¶
Helper function that calculates the minimum number of data points required
to use lyap_r.
Note that none of the required parameters may be set to None.
-
-- Kwargs:
-
-- kwargs(dict):
-- arguments used for lyap_r (required: emb_dim, lag, trajectory_len and
-min_tsep)
+
+- Kwargs:
+- kwargs(dict):
arguments used for lyap_r (required: emb_dim, lag, trajectory_len and
+min_tsep)
+
-- Returns:
-- minimum number of data points required to call lyap_r with the given
-parameters
+- Returns:
minimum number of data points required to call lyap_r with the given
+parameters
+
-
--
-
nolds.
lyap_e_len
(**kwargs)[source]¶
+
+-
+nolds.lyap_e_len(**kwargs)[source]¶
Helper function that calculates the minimum number of data points required
to use lyap_e.
Note that none of the required parameters may be set to None.
-
-- Kwargs:
-
-- kwargs(dict):
-- arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
-and min_tsep)
+
+- Kwargs:
+- kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
+and min_tsep)
+
-- Returns:
-- minimum number of data points required to call lyap_e with the given
-parameters
+- Returns:
minimum number of data points required to call lyap_e with the given
+parameters
+
+
+
+Datasets¶
+
+Benchmark dataset for hurst exponent¶
+
+-
+nolds.brown72 = array([45.47422, 42.55601, 46.5188 , ..., 42.78297, 44.34307, 40.70655])¶
+
+- ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
+
+
+An array object represents a multidimensional, homogeneous array
+of fixed-size items. An associated data-type object describes the
+format of each element in the array (its byte-order, how many bytes it
+occupies in memory, whether it is an integer, a floating point number,
+or something else, etc.)
+Arrays should be constructed using array, zeros or empty (refer
+to the See Also section below). The parameters given here refer to
+a low-level method (ndarray(…)) for instantiating an array.
+For more information, refer to the numpy module and examine the
+methods and attributes of an array.
+
+Parameters¶
+(for the __new__ method; see Notes below)
+
+- shapetuple of ints
Shape of created array.
+
+- dtypedata-type, optional
Any object that can be interpreted as a numpy data type.
+
+- bufferobject exposing buffer interface, optional
Used to fill the array with data.
+
+- offsetint, optional
Offset of array data in buffer.
+
+- stridestuple of ints, optional
Strides of data in memory.
+
+- order{‘C’, ‘F’}, optional
Row-major (C-style) or column-major (Fortran-style) order.
+
+
+
+
+Attributes¶
+
+- Tndarray
Transpose of the array.
+
+- databuffer
The array’s elements, in memory.
+
+- dtypedtype object
Describes the format of the elements in the array.
+
+- flagsdict
Dictionary containing information related to memory use, e.g.,
+‘C_CONTIGUOUS’, ‘OWNDATA’, ‘WRITEABLE’, etc.
+
+- flatnumpy.flatiter object
Flattened version of the array as an iterator. The iterator
+allows assignments, e.g., x.flat = 3
(See ndarray.flat for
+assignment examples; TODO).
+
+- imagndarray
Imaginary part of the array.
+
+- realndarray
Real part of the array.
+
+- sizeint
Number of elements in the array.
+
+- itemsizeint
The memory use of each array element in bytes.
+
+- nbytesint
The total number of bytes required to store the array data,
+i.e., itemsize * size
.
+
+- ndimint
The array’s number of dimensions.
+
+- shapetuple of ints
Shape of the array.
+
+- stridestuple of ints
The step-size required to move from one element to the next in
+memory. For example, a contiguous (3, 4)
array of type
+int16
in C-order has strides (8, 2)
. This implies that
+to move from element to element in memory requires jumps of 2 bytes.
+To move from row-to-row, one needs to jump 8 bytes at a time
+(2 * 4
).
+
+- ctypesctypes object
Class containing properties of the array needed for interaction
+with ctypes.
+
+- basendarray
If the array is a view into another array, that array is its base
+(unless that array is also a view). The base array is where the
+array data is actually stored.
+
+
+
+
+See Also¶
+array : Construct an array.
+zeros : Create an array, each element of which is zero.
+empty : Create an array, but leave its allocated memory unchanged (i.e.,
+
+it contains “garbage”).
+
+dtype : Create a data-type.
+numpy.typing.NDArray : An ndarray alias generic
+
+w.r.t. its dtype.type <numpy.dtype.type>.
+
+
+
+Notes¶
+There are two modes of creating an array using __new__
:
+
+If buffer is None, then only shape, dtype, and order
+are used.
+If buffer is an object exposing the buffer interface, then
+all keywords are interpreted.
+
+No __init__
method is needed because the array is fully initialized
+after the __new__
method.
+
+
+Examples¶
+These examples illustrate the low-level ndarray constructor. Refer
+to the See Also section above for easier ways of constructing an
+ndarray.
+First mode, buffer is None:
+>>> np.ndarray(shape=(2,2), dtype=float, order='F')
+array([[0.0e+000, 0.0e+000], # random
+ [ nan, 2.5e-323]])
+
+
+Second mode:
+>>> np.ndarray((2,), buffer=np.array([1,2,3]),
+... offset=np.int_().itemsize,
+... dtype=int) # offset = 1*itemsize, i.e. skip first element
+array([2, 3])
+
-
-Datasets¶
-
-Benchmark dataset for hurst exponent¶
-
--
-
nolds.
brown72
= array([45.47422, 42.55601, 46.5188 , ..., 42.78297, 44.34307, 40.70655])¶
-The brown72 dataset has a prescribed (uncorrected) Hurst exponent of 0.7270.
+
The brown72 dataset has a prescribed (uncorrected) Hurst exponent of 0.7270.
It is a synthetic dataset from the book “Chaos and Order in the Capital
-markets”[b7_a].
-It is included here, because the dataset can be found online [b7_b] and is
-used by other software packages such as the R-package pracma
[b7_c].
+markets”[b7_a].
+It is included here, because the dataset can be found online [b7_b] and is
+used by other software packages such as the R-package pracma
[b7_c].
As such it can be used to compare different implementations.
However, it should be noted that the idea that the “true” Hurst exponent of
this series is indeed 0.7270 is problematic for several reasons:
-- This value does not take into account the Anis-Lloyd-Peters correction
-factor.
-- It was obtained using the biased version of the standard deviation.
-- It depends (as always for the Hurst exponent) on the choice of the length
-of the subsequences.
+This value does not take into account the Anis-Lloyd-Peters correction
+factor.
+It was obtained using the biased version of the standard deviation.
+It depends (as always for the Hurst exponent) on the choice of the length
+of the subsequences.
If you want to reproduce the prescribed value, you can use the following
code:
-nolds.hurst_rs(
+nolds.hurst_rs(
nolds.brown72,
nvals=2**np.arange(3,11),
- fit="poly", corrected=False, unbiased=False
+ fit="poly", corrected=False, unbiased=False
)
References:
-
-
-
-[b7_a] Edgar Peters, “Chaos and Order in the Capital Markets: A New
+
+
+[b7_a]
+Edgar Peters, “Chaos and Order in the Capital Markets: A New
View of Cycles, Prices, and Market Volatility”, Wiley: Hoboken,
-2nd Edition, 1996.
-
-
-
-
-
-[b7_b] Ian L. Kaplan, “Estimating the Hurst Exponent”,
-url: http://www.bearcave.com/misl/misl_tech/wavelets/hurst/
-
-
-
-
-
-[b7_c] HwB, “Pracma: brown72”,
-url: https://www.rdocumentation.org/packages/pracma/versions/1.9.9/topics/brown72
-
-
+2nd Edition, 1996.
+
+
+[b7_b]
+Ian L. Kaplan, “Estimating the Hurst Exponent”,
+url: http://bearcave.com/misl/misl_tech/wavelets/hurst/index.html
+
+
+[b7_c]
+HwB, “Pracma: brown72”,
+url: https://www.rdocumentation.org/packages/pracma/versions/1.9.9/topics/brown72
+
+
+
-
-
-Tent map¶
-
--
-
nolds.
tent_map
(x, steps, mu=2)[source]¶
+
+
+Tent map¶
+
+-
+nolds.tent_map(x, steps, mu=2)[source]¶
Generates a time series of the tent map.
-
-- Characteristics and Background:
-- The name of the tent map is derived from the fact that the plot of x_i vs
+
+- Characteristics and Background:
The name of the tent map is derived from the fact that the plot of x_i vs
x_i+1 looks like a tent. For mu > 1 one application of the mapping function
can be viewed as stretching the surface on which the value is located and
then folding the area that is greater than one back towards the zero. This
corresponds nicely to the definition of chaos as expansion in one dimension
-which is counteracted by a compression in another dimension.
-- Calculating the Lyapunov exponent:
-The lyapunov exponent of the tent map can be easily calculated as due to
+which is counteracted by a compression in another dimension.
+
+- Calculating the Lyapunov exponent:
The lyapunov exponent of the tent map can be easily calculated as due to
this stretching behavior a small difference delta between two neighboring
points will indeed grow exponentially by a factor of mu in each iteration.
We thus can assume that:
@@ -1171,79 +1491,74 @@ Tent maplambda = ln(mu)
+lambda = ln(mu)
+
+- References:
-
+
+[tm_1]
+
+
+
+
+- Args:
+- x (float):
starting point
-- References:
-
-
-
-[tm_1] https://en.wikipedia.org/wiki/Tent_map
-
-
+- steps (int):
number of steps for which the generator should run
-- Args:
-
-- x (float):
-- starting point
-- steps (int):
-- number of steps for which the generator should run
-- Kwargs:
-
-- mu (int):
-- parameter mu that controls the behavior of the map
+- Kwargs:
+- mu (int):
parameter mu that controls the behavior of the map
+
-- Returns:
-
-- generator object:
-- the generator that creates the time series
+- Returns:
+- generator object:
the generator that creates the time series
+
-
-
-Logistic map¶
-
--
-
nolds.
logistic_map
(x, steps, r=4)[source]¶
+
+
+Logistic map¶
+
+-
+nolds.logistic_map(x, steps, r=4)[source]¶
Generates a time series of the logistic map.
-
-- Characteristics and Background:
-- The logistic map is among the simplest examples for a time series that can
-exhibit chaotic behavior depending on the parameter r. For r between 2 and
+
+- Characteristics and Background:
The logistic map is among the simplest examples for a time series that can
+exhibit chaotic behavior depending on the parameter r. For r between 2 and
3, the series quickly becomes static. At r=3 the first bifurcation point is
reached after which the series starts to oscillate. Beginning with r = 3.6
it shows chaotic behavior with a few islands of stability until perfect
-chaos is achieved at r = 4.
-- Calculating the Lyapunov exponent:
-To calculate the “true” Lyapunov exponent of the logistic map, we first
+chaos is achieved at r = 4.
+
+- Calculating the Lyapunov exponent:
To calculate the “true” Lyapunov exponent of the logistic map, we first
have to make a few observations for maps in general that are repeated
applications of a function to a starting value.
-If we have two starting values that differ by some infinitesimal
-\(delta_0\) then according to the definition of the lyapunov exponent
+
If we have two starting values that differ by some infinitesimal
+\(delta_0\) then according to the definition of the lyapunov exponent
we will have an exponential divergence:
-
+
\[|\delta_n| = |\delta_0| e^{\lambda n}\]
We can now write that:
-
+
\[e^{\lambda n} = \lim_{\delta_0 -> 0} |\frac{\delta_n}{\delta_0}|\]
-This is the definition of the derivative \(\frac{dx_n}{dx_0}\) of a
-point \(x_n\) in the time series with respect to the starting point
-\(x_0\) (or rather the absolute value of that derivative). Now we can
+
This is the definition of the derivative \(\frac{dx_n}{dx_0}\) of a
+point \(x_n\) in the time series with respect to the starting point
+\(x_0\) (or rather the absolute value of that derivative). Now we can
use the fact that due to the definition of our map as repetitive
application of some f we have:
-
+
\[f^{n\prime}(x) = f(f(f(...f(x_0)...))) = f'(x_n-1) \cdot f'(x_n-2)
\cdot ... \cdot f'(x_0)\]
with
-
+
\[e^{\lambda n} = |f^{n\prime}(x)|\]
we now have
-
+
\[\begin{split}e^{\lambda n} &= |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
\Leftrightarrow \\
\lambda n &= \ln |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
@@ -1251,203 +1566,306 @@ Logistic map\(f'(x)\) and as we
+For the logistic map we simply have to calculate \(f'(x)\) and as we
have
-
+
\[f(x) = r x (1-x) = rx - rx²\]
we now get
-
+
\[f'(x) = r - 2 rx\]
-- References:
-
-
-
-[lm_1] https://en.wikipedia.org/wiki/Tent_map
-
-
-
-
-
-[lm_2] https://blog.abhranil.net/2015/05/15/lyapunov-exponent-of-the-logistic-map-mathematica-code/
-
-
+- References:
-
+
+- Args:
+- x (float):
starting point
+
+- steps (int):
number of steps for which the generator should run
-- Args:
-
-- x (float):
-- starting point
-- steps (int):
-- number of steps for which the generator should run
-- Kwargs:
-
-- r (int):
-- parameter r that controls the behavior of the map
+- Kwargs:
+- r (int):
parameter r that controls the behavior of the map
+
-- Returns:
-
-- generator object:
-- the generator that creates the time series
+- Returns:
+- generator object:
the generator that creates the time series
+
-
-
-Fractional brownian motion¶
-
--
-
nolds.
fbm
(n, H=0.75)[source]¶
+
+
+Fractional brownian motion¶
+
+-
+nolds.fbm(n, H=0.75)[source]¶
Generates fractional brownian motions of desired length.
-
-- Author:
-- Christian Thomae
-- References:
-
-
-
-[fbm_1] https://en.wikipedia.org/wiki/Fractional_Brownian_motion#Method_1_of_simulation
-
-
+
+- Author:
Christian Thomae
+
+- References:
-
+
+- Args:
+- n (int):
length of sequence to generate
-- Args:
-
-- n (int):
-- length of sequence to generate
-- Kwargs:
-
-- H (float):
-- hurst parameter
+- Kwargs:
+- H (float):
hurst parameter
+
-- Returns:
-
-- array of float:
-- simulated fractional brownian motion
+- Returns:
+- array of float:
simulated fractional brownian motion
+
-
-
-Fractional gaussian noise¶
-
--
-
nolds.
fgn
(n, H=0.75)[source]¶
+
+
+Fractional gaussian noise¶
+
+-
+nolds.fgn(n, H=0.75)[source]¶
Generates fractional gaussian noise of desired length.
-
-- References:
-
-
-
-[fgn_1] https://en.wikipedia.org/wiki/Fractional_Brownian_motion
-
-
+
+- References:
-
+
+
+
+- Args:
+- n (int):
length of sequence to generate
-- Args:
-
-- n (int):
-- length of sequence to generate
-- Kwargs:
-
-- H (float):
-- hurst parameter
+- Kwargs:
+- H (float):
hurst parameter
+
-- Returns:
-
-- array of float:
-- simulated fractional gaussian noise
+- Returns:
+- array of float:
simulated fractional gaussian noise
+
-
-
-Quantum random numbers¶
-
--
-
nolds.
qrandom
(n)[source]¶
+
+
+Quantum random numbers¶
+
+-
+nolds.qrandom(n)[source]¶
Creates an array of n true random numbers obtained from the quantum random
number generator at qrng.anu.edu.au
This function requires the package quantumrandom and an internet connection.
-
+
+Financial example datasets¶
+
+-
+nolds.load_financial()[source]¶
+Loads the following datasets from CSV files in this package:
+
+jkse: Jakarta Composite Index, downloaded on 2019-02-12 from https://finance.yahoo.com/quote/%5EJKSE/history?period1=631148400&period2=988668000&interval=1d&filter=history&frequency=1d
+n225: Nikkei 225, downloaded on 2019-02-12 from https://finance.yahoo.com/quote/%5EN225/history?period1=631148400&period2=988668000&interval=1d&filter=history&frequency=1d
+ndx: NASDAQ 100, downloaded on 2019-02-12 from https://finance.yahoo.com/quote/%5ENDX/history?period1=631148400&period2=988668000&interval=1d&filter=history&frequency=1d
+
+All datasets are daily prices from the period from 1990-01-01 to 2001-05-01
+missing values are NaN except for opening values which are treated as
+follows:
+
+
+- If the first opening value is missing, the first existing opening value
is used for the first day.
+
+
+
+
+- All other missing opening values are filled by the close value of the last
day where data was available.
+
+
+
+
+
+- Returns:
+- list of tuple(1d-array, 2d-array):
datasets with days as array of date objects and 2d-array with the columns
+“Open”, “High”, “Low”, “Close”, “Adj Close”, and “Volume”. Note that
+“Open” values have been padded to ensure that there are no NaNs left.
+
+
+
+
+
+
+
+
+Fractal data used by Barabasi et al. (1991)¶
+
+-
+nolds.barabasi1991_fractal(size, iterations, b1=0.8, b2=0.5)[source]¶
+Generates the simple fractal described in [bf].
+The fractal divides a rectangular segment starting at (x0, y0) with width w
+and height h along the x axis into four line segments of equal size with the
+boundary points [x0, x1, x2, x3, x4]. It has two parameters b1 and b2 that
+allow to choose the value for y(x1) and y(x3) while it always holds that
+y(x0) = y0, y(x2) = y0 and y(x4) = y0 + h.
+The process starts with a single line segment of height 1 spanning the whole
+data range. In each iteration, the rectangles spanning the line segments
+from the previous iteration are subdivided according to the same rule.
+
+- References:
-
+
+[bf]
+
A.-L. Barabási and T. Vicsek, “Multifractality of self-affine
+fractals,” Physical Review A, vol. 44, no. 4, pp. 2730–2733, 1991.
-
+
+- Args:
+- size (int):
number of data points in the resulting array
+
+- iterations (int):
number of iterations to perform
+
+
+
+- Kwargs:
+- b1 (float):
relative height at x1 (between 0 and 1)
+
+- b2 (float):
relative height at x3 (between 0 and 1)
+
+
+
+- Returns:
+- (1d-array of float):
generated fractal
+
+
+
+
+
+
+
+
+
+
-
+
- Table Of Contents
-
-nolds
module
-- Algorithms
-- Lyapunov exponent (Rosenstein et al.)
-- Lyapunov exponent (Eckmann et al.)
-- Sample entropy
-- Hurst exponent
-- Correlation dimension
-- Detrended fluctuation analysis
+Nolds
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Navigation
+
+nolds
module
+- Algorithms
-- Helper functions
-- Datasets
+
+- Nolds examples
+- Nolds Unittests
+
+
Related Topics
@@ -1457,33 +1875,24 @@ Related Topics
-
- This Page
-
- - Show Source
-
-
-
- Quick search
-
-
-
+
+
+
+
+
+
+
+
- ©2016-2018, Christopher Schölzel.
+ ©2016-2024, Christopher Schölzel.
|
- Powered by Sphinx 1.6.6
- & Alabaster 0.7.10
+ Powered by Sphinx 8.0.2
+ & Alabaster 1.0.0
|
-
-
-
+
-
- Search — Nolds 0.5.1 documentation
-
-
-
-
-
-
-
-
+
+
+ Search — Nolds 0.6.0 documentation
+
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
+
+
-
-
-
+
+
+
Search
-
-
+
+
+
+
- From here you can search these documents. Enter your search
- words into the box below and click "search". Note that the search
- function will automatically search for all of the words. Pages
- containing fewer words won't appear in the result list.
+ Searching for multiple words only shows matches that contain
+ all words.
+
+
-
-
+
+
+
-
-
+
+
- ©2016-2018, Christopher Schölzel.
+ ©2016-2024, Christopher Schölzel.
|
- Powered by Sphinx 1.6.6
- & Alabaster 0.7.10
+ Powered by Sphinx 8.0.2
+ & Alabaster 1.0.0
diff --git a/searchindex.js b/searchindex.js
index 8c3c373..3b3bbd0 100644
--- a/searchindex.js
+++ b/searchindex.js
@@ -1 +1 @@
-Search.setIndex({docnames:["examples","index","nolds","tests"],envversion:53,filenames:["examples.rst","index.rst","nolds.rst","tests.rst"],objects:{"nolds.examples":{hurst_compare_nvals:[0,0,1,""],plot_hurst_hist:[0,0,1,""],plot_lyap:[0,0,1,""],profiling:[0,0,1,""],weron_2002_figure2:[0,0,1,""]},nolds:{binary_n:[2,0,1,""],brown72:[2,1,1,""],corr_dim:[2,0,1,""],dfa:[2,0,1,""],expected_h:[2,0,1,""],expected_rs:[2,0,1,""],fbm:[2,0,1,""],fgn:[2,0,1,""],hurst_rs:[2,0,1,""],load_qrandom:[2,0,1,""],logarithmic_n:[2,0,1,""],logarithmic_r:[2,0,1,""],logistic_map:[2,0,1,""],logmid_n:[2,0,1,""],lyap_e:[2,0,1,""],lyap_e_len:[2,0,1,""],lyap_r:[2,0,1,""],lyap_r_len:[2,0,1,""],qrandom:[2,0,1,""],sampen:[2,0,1,""],tent_map:[2,0,1,""]}},objnames:{"0":["py","function","Python function"],"1":["py","data","Python data"]},objtypes:{"0":"py:function","1":"py:data"},terms:{"2nd":2,"boolean":2,"case":3,"default":[0,2],"f\u00fcllsack":[0,2],"final":2,"float":[0,2],"function":1,"import":[1,2],"int":[0,2],"long":[0,2],"new":2,"return":2,"static":2,"true":[0,2],"while":[0,2],For:[1,2],Its:2,One:2,The:[0,1,2],Then:2,There:2,These:[0,2],Uses:2,With:2,abhranil:2,abl:2,about:0,abov:2,absolut:2,access:2,accord:2,account:2,accuraci:2,achiev:2,acronym:1,added:2,affin:2,aforement:2,after:2,algorithm:1,all:[0,2,3],allow:2,alon:2,along:2,alongsid:2,alpha:2,also:[0,2],alwai:2,american:2,among:2,analysi:1,analyt:2,ani:[0,2],anoth:2,anu:2,appli:2,applic:[0,2],approach:2,approxim:2,arang:2,area:2,arg:[0,2],argument:2,around:2,arrai:[0,2],articl:2,associ:2,assum:2,assur:2,ast:2,attractor:2,author:2,autocorrel:2,avail:0,averag:[0,2],avoid:2,awai:2,axes:2,axi:2,b7_a:2,b7_b:2,b7_c:2,back:2,background:2,base:[1,2,3],basi:2,bearcav:2,becaus:2,becom:2,befor:2,begin:2,behav:2,behavior:2,behind:2,below:2,benchmark:1,benefit:2,better:2,between:2,bias:2,bifurc:[0,2],bill:2,binary_n:2,bit:2,blog:2,boc:2,bocod:2,book:2,both:2,brain:2,brown72:2,brownian:1,bsp:2,buldyrev:2,bulletin:2,calcul:[0,1,2],call:2,can:[0,1,2,3],capac:2,capit:2,carri:2,caus:2,cd_1:2,cd_2:2,cd_3:2,cd_a:2,cd_b:2,chang:[1,2],chao:2,chaotic:2,character:2,characterist:2,chebyshev:2,choic:[0,2],choos:2,chose:2,christian:2,ciliberto:2,circulatori:2,close:2,closest:2,code:2,coeffici:2,collin:2,com:2,command:0,compar:[0,2],comparison:0,compens:2,compress:2,comput:2,condit:2,confid:[0,2],configur:0,connect:2,consensu:2,consid:2,consist:[0,2],constant:2,construct:2,contain:2,content:[1,2],contract:2,contrast:2,control:2,corr_dim:2,corrdim:2,correct:[0,2],correl:1,correspond:2,cost:2,count:2,counteract:2,cprofil:0,cran:2,creat:[0,2],critic:2,csum:2,cumul:2,cut:2,cycl:2,d_i:2,data:[0,2],dataset:1,davidson:2,debug:2,debug_data:2,debug_plot:2,decompos:2,decomposit:2,deficit:2,defin:2,definit:2,delai:2,delta:2,delta_0:2,delta_n:2,denot:2,departur:2,depend:[0,2],deriv:2,describ:[1,2],desir:2,detrend:1,develop:0,deviat:2,dfa:2,dfa_1:2,dfa_2:2,dfa_a:2,dfa_b:2,dfa_c:2,diagon:2,dict:2,differ:[0,1,2],dimens:1,dimension:[1,2],directli:2,discharg:2,dist:2,distanc:2,distribut:0,dists_m1:2,dists_m:2,div_traj:2,diverg:2,divid:2,divis:2,divisor:2,dna:2,doe:2,domin:2,done:0,down:2,dresden:2,drop:2,dtype:1,due:2,duplic:2,dynam:[1,2],each:2,easili:2,eckmann:1,edgar:2,edit:2,edu:2,either:[0,2],element:2,els:2,emb:2,emb_dim:2,embd_dim:2,embed:2,empir:2,emploi:2,entropi:1,eps:2,equal:2,equat:2,error:2,estim:[0,2],etextbook:[0,2],euclidean:2,even:2,evenli:2,exact:2,exactli:2,examin:2,exampl:[1,2],excess:2,exclud:2,exhibit:2,exist:2,expand:2,expans:2,expect:2,expected_h:2,expected_r:2,experi:0,explan:2,expon:[0,1],exponenti:2,extract:2,fact:2,factor:2,fail:3,fals:2,fast:2,fbm:2,fbm_1:2,few:[2,3],fgn:2,fgn_1:2,figur:0,file:2,fileexchang:2,fill:2,find:2,findlyap:2,finit:[0,2],first:2,fit:2,fit_exp:2,fit_offset:2,fit_trend:2,float32:1,fluctuat:1,fold:2,follow:[0,2],form:2,formula:2,found:[1,2],fourier:2,fractal:2,fraction:1,fractional_brownian_mot:2,free:2,frequenc:2,from:[0,2],fromit:1,frontier:2,gaussian:[0,1],gener:[0,2],geometri:2,get:2,github:1,give:2,given:[0,2],global:2,goldberg:2,good:2,grassberg:2,graz:[0,2],greater:2,grow:2,h2039:2,h2049:2,h_1:2,h_2:2,h_3:2,h_a:2,h_b:2,h_c:2,h_d:2,halv:2,hansen:2,hard:2,hardston:2,has:2,hase:2,have:[0,2],havlin:2,heart:2,hegger:2,helper:1,here:[1,2],high:2,higher:2,highest:2,highli:2,hist:0,histogram:[0,2],hoboken:2,holger:2,host:1,how:[1,2],howev:2,hscode:2,htm:2,html:[0,2],http:[0,2],hurst:[0,1],hurst_compare_nv:0,hurst_r:[0,2],hwb:2,hydrolog:2,hypothet:2,ian:2,idea:[0,2],ident:2,impati:1,implement:[1,2],improv:2,inaccuraci:0,includ:[2,3],increas:2,inde:2,index:[1,2],indic:2,individu:2,infinitesim:2,influenc:2,input:[0,2],insid:0,instead:2,instinct:2,integ:2,intercept:2,intern:2,internet:2,interv:[0,2],introduct:2,island:2,iter:[0,2],ith:2,its:[0,2],jacobian:2,jansen:2,journal:2,jurica:2,juricap:2,just:2,kamphorst:2,kantz:2,kaplan:2,keep:2,kei:0,know:2,known:2,kwarg:[0,2],lag:2,lambda:2,larg:2,larger:2,largest:[0,2],latest:1,law:2,le_1:2,le_a:2,le_b:2,le_c:2,lead:2,learn:1,least:2,leav:2,len:2,lenght:2,length:[0,2],less:2,let:2,letter:2,liapunov:2,librari:1,lie:2,like:[0,2],limit:2,line:2,linear:2,linkenka:2,list:2,lloyd:[0,2],lm_1:2,lm_2:2,load:2,load_qrandom:2,local:2,locat:2,log:[1,2],logarithm:2,logarithmic_n:2,logarithmic_r:2,logist:[0,1],logistic_map:[1,2],logmid_n:2,longer:2,look:2,low:2,lower:2,lowest:2,lr_1:2,lr_a:2,lr_b:2,luca:2,lyap_:[0,1,2],lyap_e_len:2,lyap_r:[0,2],lyap_r_len:2,lyaprosen:2,lyapunov:[0,1],m11003:2,mai:[2,3],mainli:0,make:2,manfr:[0,2],mansveld:2,manual:0,map:[0,1],maptyp:0,market:2,match:2,mathematica:2,mathwork:2,matlab:2,matlabcentr:2,matplotlib:0,matric:2,matrix:2,matrix_dim:2,max:[1,2],max_n:2,maxim:2,maximum:2,mdfa:2,mdfaintro:2,mean:2,meaning:2,measur:[0,1,2],mechan:[0,2],medium:2,memori:2,method:2,method_1_of_simul:2,middl:2,mietu:2,min:2,min_n:2,min_nb:2,min_neighbor:2,min_tsep:2,minim:2,minimum:2,minut:3,mirwai:2,misl:2,misl_tech:2,model:2,modul:[0,1],mohammadi:2,moorman:2,more:2,mosaic:2,most:2,motion:1,mpg:2,mpipk:2,much:2,multidimension:2,multipli:2,must:2,name:2,natur:2,need:2,neg:2,neglect:2,neighbor:2,neighborhood:2,net:2,neuron:2,next:2,nice:2,nikulin:2,nois:[0,1],non:2,none:[0,2],nonlinear:[1,2],normal:2,note:[2,3],now:2,nstep:2,nucleotid:2,number:[0,1,3],numpi:1,nval:[0,2],object:2,observ:2,obtain:[0,2],occur:2,off:2,one:[0,1,2],onli:[0,2],onlin:2,orbit:2,order:2,org:2,organ:2,origin:2,oscil:2,other:2,otherwis:2,our:2,outlier:2,output:2,over:[0,2],overcom:2,overlap:2,own:2,packag:[0,2],page:1,pair:2,paper:0,paramet:[0,2],pdf:[0,2],peng:2,perfect:2,perform:2,period:2,peter:2,phase:2,phenomena:2,physic:2,physica:[0,2],physiolog:2,physionet:2,physiotool:2,plausibl:2,pleas:3,plot:[0,2],plot_fil:2,plot_hurst_hist:0,plot_lyap:0,plt:2,poil:2,point:2,poli:2,polyfit:2,polynomi:2,posit:2,possibl:2,potenti:2,power:2,pracma:2,practic:2,prescrib:2,present:2,previou:2,price:2,probabl:2,problem:2,problemat:2,procaccia:2,procaccia_algorithm:2,procedur:2,process:2,produc:2,profil:[0,2],project:2,prone:2,properti:[0,2],provid:[1,2],python:[0,2,3],q_0:2,q_i:2,qrandom:2,qrng:2,quantum:1,quantumrandom:2,quickli:2,r_i:2,radiu:2,rafael:2,rainer:2,random:[0,1,3],randomli:0,rang:[0,2],ransac:2,rare:3,rate:2,rather:2,ratio:2,raw:2,rdocument:2,reach:2,reason:2,recommend:2,reconstruct:[0,2],record:2,recov:2,recreat:[0,2],reduc:2,refer:[0,2],rel:2,relat:2,releas:2,relev:2,remain:2,remov:2,repeat:2,repec:2,repetit:2,report:0,repres:2,reproduc:2,requir:[0,2],rescal:2,reservoir:2,resourc:1,respect:2,result:[0,2],review:2,richman:2,riken:2,river:2,robust:2,rosenstein:1,round:2,row:2,rowwise_chebyshev:2,rowwise_euclidean:2,rs50:0,rs50_raw:0,rsn:0,rsval:2,ruell:2,run:[0,2,3],rval:2,s_1:2,s_2:2,s_3:2,s_i:2,s_j:2,said:2,same:[0,2],sampen:2,sampl:[0,1],sample_entropi:2,save:2,scalar:2,scale:2,schiavon:2,scholarpedia:2,schreiber:2,scienc:[0,2],scientif:2,se_1:2,se_a:2,search:[1,2],second:2,see:2,seem:2,seen:2,select:0,self:2,send:2,sens:2,separ:2,sequenc:[0,2],seri:[1,2],set:[0,2,3],sever:2,shape:2,shapour:2,should:[0,2],show:[0,2],shown:2,sigma:2,signal:2,similar:2,simon:2,simpl:2,simplest:2,simpli:2,simul:2,sinc:2,singl:2,siu:2,size:2,slope:2,small:[0,1,2],smaller:2,softwar:2,some:[0,2,3],someth:2,somewhat:2,sourc:[0,2],space:2,specifi:2,split:2,spuriou:2,sqrt:2,squar:2,stabil:2,stand:1,standard:2,stanlei:2,start:2,state:2,stationari:2,statist:[0,2],std:2,steadi:2,step:2,steve:2,still:2,stop:2,storag:2,store:2,str:[0,2],straight:2,strang:2,strategi:2,stretch:2,strong:2,subdivid:2,subsequ:2,subseri:2,substract:2,success:2,suggest:2,suitabl:2,sum:2,superimpos:0,sure:2,surfac:2,sw2:[0,2],synthet:2,system:[0,1,2],systemat:2,t741502:2,t_i:2,take:[0,2,3],taken:[0,2],tau:2,templat:2,tempor:2,tend:2,tent:[0,1],tent_map:2,term:2,test:[0,2,3],test_measur:3,than:2,thei:2,them:2,theoret:2,therefor:[0,2,3],thi:[0,1,2],thoma:2,threshold:2,through:2,thu:2,time:[1,2],tisean:2,tisean_3:2,tm_1:2,toler:2,tomaso:2,too:2,toolbox:2,topic:2,total:2,total_n:2,toward:2,trade:2,trajectori:2,trajectory_len:2,transform:2,translat:2,trend:2,tri:2,truli:2,two:2,typic:2,unbias:2,uncorrect:[0,2],under:2,underli:2,undetermin:2,uni:[0,2],uniformli:[0,2],uniqu:2,unittest:1,unless:2,unsign:2,unstabl:2,until:2,url:[0,2],use:2,used:[0,2],user:2,uses:2,using:2,usual:2,util:2,valu:[0,2],vari:2,variabl:2,varianc:2,vector:2,veri:2,version:[0,1,2],versu:2,view:2,visit:2,vol:[0,2],volatil:2,walk:2,want:2,water:2,wavelet:2,web:2,weight:2,well:0,weron2:0,weron:[0,2],weron_2002_figure2:0,weron_raw:0,when:2,where:[0,2],which:[0,2],white:[0,2],whose:2,width:2,wiki:2,wikipedia:2,wilei:2,window:2,wise:2,within:2,without:2,would:2,write:2,wuu:2,www:2,x_0:2,x_1:2,x_2:2,x_i:2,x_j:2,y_i:2,year:2,yield:2,you:[0,1,2],yuehua:2,zero:2},titles:["Nolds examples","Welcome to Nolds\u2019 documentation!","nolds
module","Nolds Unittests"],titleterms:{"function":[0,2],algorithm:2,analysi:2,benchmark:2,brownian:2,correl:2,dataset:2,detrend:2,dimens:2,document:1,eckmann:2,entropi:2,exampl:0,expon:2,fluctuat:2,fraction:2,gaussian:2,helper:2,hurst:2,indic:1,logist:2,lyapunov:2,map:2,modul:2,motion:2,nois:2,nold:[0,1,2,3],number:2,quantum:2,random:2,rosenstein:2,sampl:2,tabl:1,tent:2,unittest:3,welcom:1}})
\ No newline at end of file
+Search.setIndex({"alltitles": {"Algorithms": [[2, "algorithms"]], "Attributes": [[2, "attributes"]], "Benchmark dataset for hurst exponent": [[2, "benchmark-dataset-for-hurst-exponent"]], "Correlation dimension": [[2, "correlation-dimension"]], "Datasets": [[2, "datasets"]], "Detrended fluctuation analysis": [[2, "detrended-fluctuation-analysis"]], "Examples": [[2, "examples"]], "Financial example datasets": [[2, "financial-example-datasets"]], "Fractal data used by Barabasi et al. (1991)": [[2, "fractal-data-used-by-barabasi-et-al-1991"]], "Fractional brownian motion": [[2, "fractional-brownian-motion"]], "Fractional gaussian noise": [[2, "fractional-gaussian-noise"]], "Functions in nolds.examples": [[0, "functions-in-nolds-examples"]], "Generalized Hurst Exponent (Barab\u00e1si et al.)": [[2, "generalized-hurst-exponent-barabasi-et-al"]], "Generalized Hurst Exponent (Di Matteo et al.)": [[2, "generalized-hurst-exponent-di-matteo-et-al"]], "Helper functions": [[2, "helper-functions"]], "Hurst exponent": [[2, "hurst-exponent"]], "Indices and tables": [[1, "indices-and-tables"]], "Logistic map": [[2, "logistic-map"]], "Lyapunov exponent (Eckmann et al.)": [[2, "lyapunov-exponent-eckmann-et-al"]], "Lyapunov exponent (Rosenstein et al.)": [[2, "lyapunov-exponent-rosenstein-et-al"]], "Nolds Unittests": [[3, null]], "Nolds examples": [[0, null]], "Notes": [[2, "notes"]], "Parameters": [[2, "parameters"]], "Quantum random numbers": [[2, "quantum-random-numbers"]], "Sample entropy": [[2, "sample-entropy"]], "See Also": [[2, "see-also"]], "Tent map": [[2, "tent-map"]], "Welcome to Nolds\u2019 documentation!": [[1, null]], "nolds module": [[2, null]]}, "docnames": ["examples", "index", "nolds", "tests"], "envversion": {"sphinx": 63, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.viewcode": 1}, "filenames": ["examples.rst", "index.rst", "nolds.rst", "tests.rst"], "indexentries": {"barabasi1991_fractal() (in module nolds)": [[2, "nolds.barabasi1991_fractal", false]], "binary_n() (in module nolds)": [[2, "nolds.binary_n", false]], "brown72 (in module nolds)": [[2, "nolds.brown72", false]], "corr_dim() (in module nolds)": [[2, "nolds.corr_dim", false]], "dfa() (in module nolds)": [[2, "nolds.dfa", false]], "expected_h() (in module nolds)": [[2, "nolds.expected_h", false]], "expected_rs() (in module nolds)": [[2, "nolds.expected_rs", false]], "fbm() (in module nolds)": [[2, "nolds.fbm", false]], "fgn() (in module nolds)": [[2, "nolds.fgn", false]], "hurst_compare_nvals() (in module nolds.examples)": [[0, "nolds.examples.hurst_compare_nvals", false]], "hurst_rs() (in module nolds)": [[2, "nolds.hurst_rs", false]], "load_financial() (in module nolds)": [[2, "nolds.load_financial", false]], "load_qrandom() (in module nolds)": [[2, "nolds.load_qrandom", false]], "logarithmic_n() (in module nolds)": [[2, "nolds.logarithmic_n", false]], "logarithmic_r() (in module nolds)": [[2, "nolds.logarithmic_r", false]], "logistic_map() (in module nolds)": [[2, "nolds.logistic_map", false]], "logmid_n() (in module nolds)": [[2, "nolds.logmid_n", false]], "lyap_e() (in module nolds)": [[2, "nolds.lyap_e", false]], "lyap_e_len() (in module nolds)": [[2, "nolds.lyap_e_len", false]], "lyap_r() (in module nolds)": [[2, "nolds.lyap_r", false]], "lyap_r_len() (in module nolds)": [[2, "nolds.lyap_r_len", false]], "mfhurst_b() (in module nolds)": [[2, "nolds.mfhurst_b", false]], "mfhurst_dm() (in module nolds)": [[2, "nolds.mfhurst_dm", false]], "plot_hurst_hist() (in module nolds.examples)": [[0, "nolds.examples.plot_hurst_hist", false]], "plot_lyap() (in module nolds.examples)": [[0, "nolds.examples.plot_lyap", false]], "profiling() (in module nolds.examples)": [[0, "nolds.examples.profiling", false]], "qrandom() (in module nolds)": [[2, "nolds.qrandom", false]], "sampen() (in module nolds)": [[2, "nolds.sampen", false]], "tent_map() (in module nolds)": [[2, "nolds.tent_map", false]], "weron_2002_figure2() (in module nolds.examples)": [[0, "nolds.examples.weron_2002_figure2", false]]}, "objects": {"nolds": [[2, 0, 1, "", "barabasi1991_fractal"], [2, 0, 1, "", "binary_n"], [2, 1, 1, "", "brown72"], [2, 0, 1, "", "corr_dim"], [2, 0, 1, "", "dfa"], [2, 0, 1, "", "expected_h"], [2, 0, 1, "", "expected_rs"], [2, 0, 1, "", "fbm"], [2, 0, 1, "", "fgn"], [2, 0, 1, "", "hurst_rs"], [2, 0, 1, "", "load_financial"], [2, 0, 1, "", "load_qrandom"], [2, 0, 1, "", "logarithmic_n"], [2, 0, 1, "", "logarithmic_r"], [2, 0, 1, "", "logistic_map"], [2, 0, 1, "", "logmid_n"], [2, 0, 1, "", "lyap_e"], [2, 0, 1, "", "lyap_e_len"], [2, 0, 1, "", "lyap_r"], [2, 0, 1, "", "lyap_r_len"], [2, 0, 1, "", "mfhurst_b"], [2, 0, 1, "", "mfhurst_dm"], [2, 0, 1, "", "qrandom"], [2, 0, 1, "", "sampen"], [2, 0, 1, "", "tent_map"]], "nolds.examples": [[0, 0, 1, "", "hurst_compare_nvals"], [0, 0, 1, "", "plot_hurst_hist"], [0, 0, 1, "", "plot_lyap"], [0, 0, 1, "", "profiling"], [0, 0, 1, "", "weron_2002_figure2"]]}, "objnames": {"0": ["py", "function", "Python function"], "1": ["py", "data", "Python data"]}, "objtypes": {"0": "py:function", "1": "py:data"}, "terms": {"": 2, "0": [1, 2], "000": 2, "00144": 2, "006": 2, "01": 2, "02": 2, "023": 2, "03": 2, "04": 2, "05": 2, "0e": 2, "1": [0, 1, 2], "10": 2, "100": 2, "1000": 1, "10000": [0, 2], "1016": 2, "1017": 2, "1024": 0, "11": 2, "117": 2, "12": 2, "13": 2, "134": 2, "15": 2, "16": 2, "180": 2, "183": 2, "188": 2, "189": 2, "1956": 2, "1957": 2, "1983": 2, "1986": 2, "1990": 2, "1991": [0, 1], "1993": 2, "1994": 2, "1996": 2, "1d": 2, "1st": 2, "2": [0, 2], "20": 2, "2000": 2, "2001": 2, "2002": [0, 2], "2003": [0, 2], "2008": 2, "2012": 2, "2015": 2, "2019": 2, "208": 2, "21": 2, "225": 2, "233": 2, "24089": 2, "25": 2, "256": 0, "27": 2, "2730": 2, "2733": 2, "278": 2, "285": [0, 2], "295": 2, "299": [0, 2], "2d": 2, "2h": 2, "2m": 2, "2nd": 2, "3": 2, "30": 2, "30076": 2, "3043": 2, "312": [0, 2], "323": 2, "324": 2, "34": 2, "34307": 2, "346": 2, "38424": 2, "387": 2, "4": [1, 2], "40": 2, "42": 2, "4371": 2, "44": 2, "441": 2, "45": 2, "454": 2, "46": 2, "47422": 2, "49": 2, "494": 2, "4971": 2, "4979": 2, "5": 2, "50": 2, "5080": 2, "5090": 2, "512": 0, "5188": 2, "55601": 2, "5e": 2, "5ejks": 2, "5en225": 2, "5endx": 2, "6": 2, "631148400": 2, "65": 2, "65536": 0, "66": 2, "70655": 2, "7270": 2, "75": 2, "78297": 2, "8": 2, "9": 2, "96": 2, "9842": 2, "988668000": 2, "A": [0, 1, 2], "As": 2, "At": 2, "By": 2, "For": [1, 2], "If": 2, "In": 2, "It": [1, 2], "Its": 2, "No": 2, "One": 2, "The": [0, 1, 2], "Then": 2, "There": 2, "These": [0, 2], "To": 2, "With": 2, "_": 2, "__init__": 2, "__new__": 2, "_i": 2, "_n": 2, "_x": 2, "abhranil": 2, "abl": 2, "about": [0, 2], "abov": 2, "absolut": 2, "access": 2, "accord": 2, "account": 2, "accuraci": 2, "achiev": 2, "acronym": 1, "across": 2, "actual": 2, "ad": 2, "addit": 2, "adj": 2, "affin": 2, "aforement": 2, "after": 2, "against": 2, "al": [0, 1], "algorithm": 1, "alia": 2, "all": [0, 2, 3], "alloc": 2, "allow": 2, "alon": 2, "along": 2, "alongsid": 2, "alpha": 2, "also": 0, "alter": 2, "alwai": 2, "american": 2, "among": 2, "an": [1, 2], "analysi": 1, "analyt": 2, "analyz": 2, "ani": [0, 2], "anoth": 2, "anu": 2, "appli": 2, "applic": [0, 2], "approach": 2, "approxim": 2, "ar": [0, 2, 3], "arang": 2, "area": 2, "arg": [0, 2], "argument": 2, "around": 2, "arrai": [0, 2], "articl": 2, "artifact": 2, "assign": 2, "associ": 2, "assum": 2, "assur": 2, "ast": 2, "attractor": 2, "au": 2, "author": 2, "autocorrel": 2, "avail": [0, 2], "averag": [0, 2], "avoid": 2, "awai": 2, "ax": 2, "axi": 2, "b1": 2, "b2": 2, "b7_a": 2, "b7_b": 2, "b7_c": 2, "back": 2, "background": 2, "barabasi": [0, 1], "barabasi1991_fract": 2, "barabasi_1991_figure2": 0, "barabasi_1991_figure3": 0, "barab\u00e1si": 1, "bartsch": 2, "base": [1, 2, 3], "bashan": 2, "basi": 2, "bearcav": 2, "becaus": 2, "becom": 2, "been": 2, "befor": 2, "begin": 2, "behav": 2, "behavior": 2, "below": 2, "benchmark": 1, "benefit": 2, "better": 2, "between": 2, "beyond": 2, "bf": 2, "bia": 2, "bias": 2, "bifurc": [0, 2], "bill": 2, "binary_n": [1, 2], "biolog": 2, "bit": 2, "blog": 2, "boc": 2, "bocod": 2, "book": 2, "boolean": 2, "both": 2, "bound": 2, "boundari": 2, "brain": 2, "brown72": 2, "brownian": 1, "bsp": 2, "buffer": 2, "buldyrev": 2, "bulletin": 2, "bund": 2, "byte": 2, "c": 2, "c_contigu": 2, "c_m": 2, "c_m1": 2, "c_q": 2, "calcul": [0, 1, 2], "call": 2, "cambridg": 2, "can": [0, 1, 2, 3], "cannot": 2, "capac": 2, "capit": 2, "care": 2, "carri": 2, "case": [2, 3], "caus": 2, "cbo9780511535338": 2, "cd_1": 2, "cd_2": 2, "cd_3": 2, "cd_a": 2, "cd_b": 2, "cdot": 2, "certain": 2, "chang": [1, 2], "chao": 2, "chaotic": 2, "character": 2, "characterist": 2, "chebyshev": 2, "check": 2, "choic": [0, 2], "choos": 2, "chose": 2, "christian": 2, "ciliberto": 2, "circulatori": 2, "class": 2, "clearli": 2, "close": 2, "closest": 2, "code": 2, "coeffici": 2, "collin": 2, "column": 2, "com": 2, "command": 0, "compar": [0, 2], "comparison": [0, 2], "compens": 2, "composit": 2, "compress": 2, "comput": 2, "condit": 2, "confid": [0, 2], "configur": 0, "connect": 2, "consensu": 2, "consid": 2, "consist": [0, 2], "constant": 2, "construct": 2, "constructor": 2, "contain": 2, "content": [1, 2], "contigu": 2, "contract": 2, "contrast": 2, "control": 2, "coordin": [0, 2], "corr_dim": 2, "corrdim": 2, "correct": [0, 2], "correl": 1, "correspond": 2, "cost": 2, "could": 2, "count": 2, "counter": 2, "counteract": 2, "cprofil": 0, "cran": 2, "creat": [0, 2], "critic": 2, "csum": 2, "csv": 2, "ctype": 2, "cumul": 2, "curious": 2, "cut": 2, "cycl": 2, "d": 2, "d_i": 2, "dacorogna": 2, "dai": 2, "daili": 2, "data": [0, 1], "dataset": 1, "date": 2, "davidson": 2, "de": 2, "debug": 2, "debug_data": 2, "debug_plot": 2, "declin": 2, "decompos": 2, "decomposit": 2, "decreas": 2, "default": [0, 2], "deficit": 2, "defin": 2, "definit": 2, "delai": 2, "delta": 2, "delta_0": 2, "delta_n": 2, "delta_x": 2, "denot": 2, "departur": 2, "depend": [0, 2], "deriv": 2, "describ": [1, 2], "descript": 2, "design": 2, "desir": 2, "detect": 2, "determin": 2, "detrend": 1, "develop": [0, 2], "deviat": 2, "dfa": 2, "dfa_1": 2, "dfa_2": 2, "dfa_3": 2, "dfa_4": 2, "dfa_5": 2, "dfa_a": 2, "dfa_b": 2, "dfa_c": 2, "di": [0, 1], "diagon": 2, "dict": 2, "dictionari": 2, "differ": [0, 1, 2], "dimens": 1, "dimension": [1, 2], "directli": 2, "discharg": 2, "discret": 2, "diseas": 2, "dist": 2, "distanc": 2, "distinguish": 2, "distribut": [0, 2], "dists_m": 2, "dists_m1": 2, "div_traj": 2, "diverg": 2, "divid": 2, "divis": 2, "divisor": 2, "dna": 2, "do": 2, "doc": 2, "docs_c": 2, "doe": 2, "doi": 2, "domin": 2, "done": [0, 2], "down": 2, "download": 2, "dresden": 2, "drop": 2, "dtype": [1, 2], "due": 2, "duplic": 2, "dx_0": 2, "dx_n": 2, "dynam": [1, 2], "e": 2, "each": 2, "easier": 2, "easili": 2, "eckmann": 1, "ed": 2, "edgar": 2, "edit": 2, "edu": 2, "effect": 2, "either": [0, 2], "element": 2, "els": 2, "emb": 2, "emb_dim": 2, "embd_dim": 2, "embed": 2, "empir": 2, "emploi": 2, "empti": 2, "en": 2, "enabl": 2, "ensur": 2, "entropi": 1, "ep": 2, "equal": 2, "equat": 2, "equival": 2, "error": 2, "especi": 2, "essenti": 2, "estim": [0, 2], "et": [0, 1], "etc": 2, "etextbook": [0, 2], "euclidean": 2, "even": 2, "evenli": 2, "everi": 2, "exact": 2, "exactli": 2, "examin": 2, "exampl": 1, "except": 2, "excess": 2, "exclud": 2, "exhibit": 2, "exist": 2, "expand": 2, "expans": 2, "expect": 2, "expected_h": [1, 2], "expected_r": [1, 2], "experi": [0, 2], "explan": 2, "expon": [0, 1], "exponenti": 2, "expos": 2, "extend": 2, "extern": 2, "extract": 2, "f": 2, "fact": 2, "factor": 2, "fail": 3, "fals": 2, "fast": 2, "faster": 2, "fbm": 2, "fbm_1": 2, "feasibl": 2, "few": [2, 3], "fgn": 2, "fgn_1": 2, "figur": 0, "file": 2, "fileexchang": 2, "fill": 2, "filter": 2, "final": 2, "financ": 2, "financi": 1, "find": 2, "findlyap": 2, "finit": [0, 2], "first": 2, "fit": 2, "fit_exp": 2, "fit_offset": 2, "fit_trend": 2, "fix": 2, "flag": 2, "flat": 2, "flatit": 2, "flatten": 2, "float": [0, 2], "float32": 1, "fluctuat": 1, "fold": 2, "follow": [0, 2], "form": 2, "format": 2, "formula": 2, "fortran": 2, "found": [1, 2], "four": 2, "fourier": 2, "frac": 2, "fractal": 1, "fraction": 1, "fractional_brownian_mot": 2, "free": 2, "frequenc": 2, "from": [0, 2], "fromit": 1, "frontier": 2, "fulli": 2, "function": 1, "f\u00fcllsack": [0, 2], "g": 2, "gait": 2, "gamma": 2, "garbag": 2, "gaussian": [0, 1], "gave": 2, "gener": [0, 1], "geometri": 2, "get": 2, "ghe": 2, "github": 1, "give": 2, "given": [0, 2], "goldberg": 2, "good": 2, "grassberg": 2, "graz": [0, 2], "greater": 2, "grow": 2, "h": 2, "h2039": 2, "h2049": 2, "h_0": 2, "h_1": 2, "h_2": 2, "h_3": 2, "h_a": 2, "h_b": 2, "h_c": 2, "h_q": 2, "ha": 2, "halv": 2, "hansen": 2, "hard": 2, "hardston": 2, "hase": 2, "hausdorff": 2, "have": [0, 2], "havlin": 2, "he": 2, "health": 2, "heart": 2, "heartbeat": 2, "hegger": 2, "height": 2, "helper": 1, "here": [1, 2], "hierarchi": 2, "high": 2, "higher": 2, "highest": 2, "hist": 0, "histogram": [0, 2], "histori": 2, "hoboken": 2, "hold": 2, "holger": 2, "homogen": 2, "host": 1, "how": [1, 2], "howev": 2, "hscode": 2, "htm": 2, "html": [0, 2], "http": [0, 2], "human": 2, "hurst": [0, 1], "hurst_compare_nv": [0, 1], "hurst_mf_stock": 0, "hurst_r": [0, 2], "hwb": 2, "hydrologi": 2, "hypothet": 2, "h\u00f6lder": 2, "i": [0, 1, 2], "ian": 2, "idea": [0, 2], "ident": 2, "illustr": 2, "imag": 2, "imaginari": 2, "impati": 1, "implement": [1, 2], "impli": 2, "import": [1, 2], "improv": 2, "inaccuraci": 0, "includ": [2, 3], "increas": 2, "inde": 2, "independ": 2, "index": [1, 2], "indic": 2, "individu": 2, "infinit": 2, "infinitesim": 2, "inform": 2, "inher": 2, "initi": 2, "input": [0, 2], "insid": 0, "instanti": 2, "instead": 2, "instinct": 2, "int": [0, 2], "int16": 2, "int_": 2, "integ": 2, "integr": 2, "interact": 2, "intercept": 2, "interest": 2, "interfac": 2, "intern": 2, "internet": 2, "interpret": 2, "interv": [0, 2], "introduc": 2, "introduct": 2, "intuit": 2, "island": 2, "item": 2, "items": 2, "iter": [0, 2], "ith": 2, "its": [0, 2], "j": 2, "jacobian": 2, "jakarta": 2, "jansen": 2, "je": 2, "jkse": 2, "journal": 2, "jp": 2, "jump": 2, "jun": 2, "jurica": 2, "juricap": 2, "just": 2, "justif": 2, "k": 2, "kamphorst": 2, "kantelhardt": 2, "kantz": 2, "kaplan": 2, "keep": 2, "kei": 0, "keyword": 2, "know": 2, "known": 2, "koscielni": 2, "kwarg": [0, 2], "l": [1, 2], "lag": 2, "lambda": 2, "larg": 2, "larger": 2, "largest": [0, 2], "last": 2, "later": 2, "latest": 1, "law": 2, "le_1": 2, "le_a": 2, "le_b": 2, "le_c": 2, "lead": 2, "learn": 1, "least": 2, "leav": 2, "left": 2, "leftrightarrow": 2, "len": 2, "lenght": 2, "length": [0, 2], "less": 2, "let": 2, "letter": 2, "level": 2, "liapunov": 2, "librari": 1, "lie": 2, "like": [0, 2], "lim_": 2, "limit": 2, "line": 2, "linear": 2, "linkenka": 2, "list": 2, "literatur": [0, 2], "ll": 0, "lloyd": [0, 2], "lm": [1, 2], "lm_1": 2, "lm_2": 2, "ln": 2, "load": 2, "load_financi": 2, "load_qrandom": 2, "local": 2, "locat": 2, "log": [1, 2], "logarithm": 2, "logarithmic_n": [1, 2], "logarithmic_r": [1, 2], "logist": [0, 1], "logistic_map": [1, 2], "logmid_n": [1, 2], "long": [0, 2], "longer": 2, "look": 2, "lorenz": 0, "lot": 2, "low": 2, "lower": 2, "lowest": 2, "lr_1": 2, "lr_a": 2, "lr_b": 2, "lr_c": 2, "luca": 2, "lyap_": [0, 1, 2], "lyap_e_len": [1, 2], "lyap_r": [0, 2], "lyap_r_len": [1, 2], "lyaprosen": 2, "lyapunov": [0, 1], "m": [0, 2, 3], "m11003": 2, "mai": [2, 3], "main": [0, 2], "mainli": 0, "major": 2, "make": 2, "manfr": [0, 2], "mani": 2, "mansveld": 2, "manual": 0, "map": [0, 1], "maptyp": 0, "market": 2, "match": 2, "mathemat": 2, "mathematica": 2, "mathwork": 2, "matlab": 2, "matlabcentr": 2, "matplotlib": 0, "matric": 2, "matrix": 2, "matrix_dim": 2, "matteo": [0, 1], "max": [1, 2], "max_dist": 2, "max_n": 2, "maxim": 2, "maximum": 2, "mdfa": 2, "mdfaintro": 2, "mean": 2, "meaning": 2, "measur": [0, 1, 2], "mechan": [0, 2], "medium": 2, "memori": 2, "method": 2, "method_1_of_simul": 2, "mfhurst_b": 2, "mfhurst_d": 2, "mfhurst_dm": 2, "mh_1": 2, "mh_q": 2, "mhd_1": 2, "mhd_a": 2, "middl": 2, "mietu": 2, "min": 2, "min_n": 2, "min_nb": 2, "min_neighbor": 2, "min_tsep": 2, "minim": 2, "minimum": 2, "minut": 3, "mirwai": 2, "misl": 2, "misl_tech": 2, "miss": 2, "mode": 2, "model": 2, "modul": [0, 1], "mohammadi": 2, "monoton": 2, "moorman": 2, "more": 2, "mosaic": 2, "most": 2, "motion": 1, "move": 2, "mpg": 2, "mpipk": 2, "mu": 2, "much": 2, "multidimension": 2, "multifract": 2, "multipl": 2, "multipli": 2, "must": 2, "my": 2, "n": [0, 2], "n225": 2, "name": 2, "nan": 2, "nasdaq": 2, "natur": 2, "nbyte": 2, "ndarrai": 2, "ndim": 2, "ndx": 2, "need": 2, "neg": 2, "neglect": 2, "neighbor": 2, "neighborhood": 2, "net": 2, "neuron": 2, "new": [0, 2], "next": 2, "nice": 2, "nikkei": 2, "nikulin": 2, "nois": [0, 1], "non": 2, "none": [0, 2], "nonlinear": [1, 2], "normal": 2, "note": 3, "notion": 2, "now": 2, "np": [1, 2], "nstep": 2, "nucleotid": 2, "number": [0, 1, 3], "numpi": [1, 2], "nval": [0, 2], "o": 2, "object": 2, "observ": 2, "obtain": [0, 2], "occupi": 2, "occur": 2, "off": 2, "offset": 2, "old": 0, "omit": 2, "one": [0, 1, 2], "onli": [0, 2], "onlin": 2, "open": 2, "option": 2, "orbit": 2, "order": 2, "org": 2, "organ": 2, "origin": 2, "oscil": 2, "other": 2, "otherwis": 2, "our": 2, "out": 2, "outlier": 2, "output": 2, "over": [0, 2], "overestim": 2, "overlap": 2, "own": 2, "owndata": 2, "p": 2, "p_": 2, "packag": [0, 2], "pad": 2, "page": 1, "pair": 2, "paper": 0, "paramet": 0, "part": 2, "pdf": [0, 2], "peng": 2, "per": 2, "perfect": 2, "perform": 2, "period": 2, "period1": 2, "period2": 2, "peter": 2, "phase": 2, "phenomena": 2, "physa": 2, "physic": 2, "physica": [0, 2], "physiolog": 2, "physiologi": 2, "physionet": 2, "physiotool": 2, "pi": 2, "pk": 2, "plausibl": 2, "pleas": 3, "plot": [0, 2], "plot_fil": 2, "plot_hurst_hist": [0, 1], "plot_lyap": [0, 1], "plt": 2, "poil": 2, "point": 2, "poli": 2, "polyfit": 2, "polynomi": 2, "posit": 2, "possibl": 2, "potenti": 2, "power": 2, "pp": [0, 2], "pracma": 2, "practic": 2, "preprocess": 2, "prescrib": [0, 2], "present": 2, "press": 2, "previou": 2, "price": 2, "prime": 2, "probabl": 2, "problem": 2, "problemat": 2, "procaccia": 2, "procaccia_algorithm": 2, "procedur": 2, "process": 2, "produc": 2, "profil": [0, 1, 2], "project": 2, "prone": 2, "properti": [0, 2], "proport": 2, "provid": [1, 2], "pure": 2, "python": [0, 2, 3], "q": 2, "q_": 2, "q_0": 2, "q_i": 2, "qr": 2, "qrandom": 2, "qrng": 2, "qth": 2, "quantifi": 2, "quantum": 1, "quantumrandom": 2, "quickli": 2, "quot": 2, "qval": 2, "r": [0, 1, 2], "r_i": 2, "radiu": 2, "rafael": 2, "rainer": 2, "rais": 2, "random": [0, 1, 3], "randomli": 0, "rang": [0, 2], "ransac": 2, "rare": 3, "rate": 2, "rather": 2, "ratio": 2, "rdocument": 2, "reach": 2, "real": 2, "reason": 2, "recommend": 2, "reconstruct": [0, 2], "record": 2, "recov": 2, "recreat": [0, 2], "rectangl": 2, "rectangular": 2, "reduc": 2, "refer": [0, 2], "rego": 2, "rel": 2, "relat": 2, "releas": 2, "relev": 2, "remain": 2, "remov": 2, "repeat": 2, "repec": 2, "repetit": 2, "report": 0, "repres": 2, "reproduc": 2, "requir": [0, 2], "rescal": 2, "research": 2, "reservoir": 2, "resourc": 1, "respect": [0, 2], "result": [0, 2], "return": 2, "review": 2, "richman": 2, "riken": 2, "river": 2, "robust": 2, "root": 2, "rosenstein": 1, "rough": 2, "round": 2, "row": 2, "rowwise_chebyshev": 2, "rowwise_euclidean": 2, "rs50": 0, "rs50_raw": 0, "rsn": 0, "rsval": 2, "ruell": 2, "rule": 2, "run": [0, 2, 3], "rval": 2, "rx": 2, "rx\u00b2": 2, "s0378": 2, "s_1": 2, "s_i": 2, "s_j": 2, "same": [0, 2], "sampen": [0, 2], "sampl": [0, 1], "sample_entropi": 2, "satisfi": 2, "save": 2, "scalar": 2, "scale": 2, "schiavon": 2, "scholarpedia": 2, "schreiber": 2, "scienc": [0, 2], "scientif": 2, "se_1": 2, "se_a": 2, "search": [1, 2], "second": 2, "section": 2, "seem": 2, "seen": 2, "segment": 2, "select": 0, "self": 2, "send": 2, "sens": 2, "sensibl": 2, "sep": 2, "separ": 2, "sequenc": [0, 2], "seri": [1, 2], "set": [0, 2, 3], "sever": 2, "sh_q": 2, "shape": 2, "shapour": 2, "shift": 2, "short": 2, "should": [0, 2], "show": [0, 2], "shown": 2, "shrink": 2, "side": 2, "sigma": 2, "sign": 2, "signal": 2, "similar": 2, "simon": 2, "simpl": 2, "simplest": 2, "simpli": 2, "simul": 2, "sinc": 2, "singl": 2, "siu": 2, "size": 2, "skew": 2, "skip": 2, "slope": 2, "slowli": 2, "small": [0, 1, 2], "smaller": 2, "smooth": 2, "so": 2, "softwar": 2, "some": [0, 2, 3], "someth": 2, "somewhat": 2, "sourc": [0, 2], "space": 2, "span": 2, "specifi": 2, "split": 2, "spuriou": 2, "sqrt": 2, "squar": 2, "stabil": 2, "stand": 1, "standard": 2, "stanlei": 2, "start": 2, "state": 2, "static": 2, "stationar": 2, "stationari": 2, "statist": [0, 2], "std": 2, "steadi": 2, "step": 2, "steve": 2, "still": 2, "stochast": 2, "stock": 2, "stop": 2, "storag": 2, "store": 2, "str": [0, 2], "straight": 2, "strang": 2, "strategi": 2, "stretch": 2, "stride": 2, "strong": 2, "studi": 2, "style": 2, "subdivid": 2, "subsequ": 2, "subseri": 2, "subtract": 2, "success": 2, "suggest": 2, "suitabl": 2, "sum": 2, "sum_": 2, "superimpos": 0, "sure": 2, "surfac": 2, "sw2": [0, 2], "synthet": 2, "system": [0, 1, 2], "systemat": 2, "t": 2, "t741502": 2, "t_i": 2, "take": [0, 2, 3], "taken": [0, 2], "talk": 2, "tau": 2, "tau_max": 2, "techniqu": 2, "templat": 2, "tempor": 2, "tend": 2, "tent": [0, 1], "tent_map": 2, "term": 2, "test": [0, 2, 3], "test_measur": 3, "than": 2, "thei": 2, "them": [0, 2], "theoret": 2, "theori": 2, "therefor": [0, 2, 3], "thi": [0, 1, 2], "think": 2, "thoma": 2, "those": 2, "threshold": 2, "through": 2, "thu": 2, "time": [1, 2], "tisean": 2, "tisean_3": 2, "tm_1": 2, "todo": 2, "tol": 0, "toler": [0, 2], "tomaso": 2, "too": 2, "toolbox": 2, "topic": 2, "total": 2, "total_n": 2, "toward": 2, "trade": 2, "trajectori": 2, "trajectory_len": 2, "transform": 2, "translat": 2, "transpos": 2, "treat": 2, "trend": 2, "tri": 2, "true": [0, 2], "truli": 2, "tupl": 2, "two": 2, "type": 2, "typic": 2, "u": 2, "unbias": 2, "unchang": 2, "uncorrect": [0, 2], "under": 2, "underestim": 2, "underli": 2, "undetermin": 2, "uni": [0, 2], "uniformli": [0, 2], "uniqu": 2, "unittest": 1, "univers": 2, "unless": 2, "unsign": 2, "unstabl": 2, "until": 2, "up": 2, "url": [0, 2], "us": [0, 1], "user": 2, "usual": 2, "util": 2, "v": 2, "valu": [0, 2], "vari": 2, "variabl": 2, "varianc": 2, "vector": 2, "veri": 2, "version": [0, 1, 2], "versu": 2, "vicsek": 2, "view": 2, "visit": 2, "vol": [0, 2], "volatil": 2, "volum": 2, "w": [0, 2], "w_": 2, "wa": 2, "wai": 2, "walk": 2, "walleczek": 2, "want": 2, "warn": 2, "water": 2, "wavelet": 2, "we": 2, "web": 2, "weight": 2, "well": 0, "weron": [0, 2], "weron2": 0, "weron_2002_figure2": [0, 1], "weron_raw": 0, "when": 2, "where": [0, 2], "whether": 2, "which": [0, 2], "while": [0, 2], "white": [0, 2], "whole": 2, "whose": 2, "width": 2, "wiki": 2, "wikipedia": 2, "wilei": 2, "window": 2, "wise": 2, "within": 2, "without": 2, "word": 2, "would": 2, "write": 2, "writeabl": 2, "wuu": 2, "www": 2, "x": [0, 1, 2], "x0": 2, "x1": 2, "x2": 2, "x3": 2, "x4": 2, "x_": 2, "x_0": 2, "x_1": 2, "x_2": 2, "x_i": 2, "x_j": 2, "x_k": 2, "x_n": 2, "xval": 2, "y": [0, 2], "y0": 2, "y_": 2, "y_0": 2, "y_1": 2, "y_2": 2, "y_d": 2, "y_i": 2, "y_n": 2, "yahoo": 2, "year": 2, "yield": 2, "you": [0, 1, 2], "yuehua": 2, "yval": 2, "z": 0, "zero": 2, "zoom": 2}, "titles": ["Nolds examples", "Welcome to Nolds\u2019 documentation!", "nolds
module", "Nolds Unittests"], "titleterms": {"1991": 2, "al": 2, "algorithm": 2, "also": 2, "analysi": 2, "attribut": 2, "barabasi": 2, "barab\u00e1si": 2, "benchmark": 2, "brownian": 2, "correl": 2, "data": 2, "dataset": 2, "detrend": 2, "di": 2, "dimens": 2, "document": 1, "eckmann": 2, "entropi": 2, "et": 2, "exampl": [0, 2], "expon": 2, "financi": 2, "fluctuat": 2, "fractal": 2, "fraction": 2, "function": [0, 2], "gaussian": 2, "gener": 2, "helper": 2, "hurst": 2, "indic": 1, "logist": 2, "lyapunov": 2, "map": 2, "matteo": 2, "modul": 2, "motion": 2, "nois": 2, "nold": [0, 1, 2, 3], "note": 2, "number": 2, "paramet": 2, "quantum": 2, "random": 2, "rosenstein": 2, "sampl": 2, "see": 2, "tabl": 1, "tent": 2, "unittest": 3, "us": 2, "welcom": 1}})
\ No newline at end of file
diff --git a/tests.html b/tests.html
index 1f922ac..df39c9c 100644
--- a/tests.html
+++ b/tests.html
@@ -1,60 +1,79 @@
+
-
-
-
+
-
- Nolds Unittests — Nolds 0.5.1 documentation
-
-
-
-
-
-
-
+
+
+
+ Nolds Unittests — Nolds 0.6.0 documentation
+
+
+
+
+
+
+
+
-
-
-
+
+
+
-
-Nolds Unittests¶
+
+Nolds Unittests¶
Nolds includes a set of unittests that can be run with
-python -m unittest nolds.test_measures
.
+python -m unittest nolds.test_measures
.
Some of these tests are based on random numbers and can therefore fail in rare
cases.
Please note that running all tests may take a few minutes.
-
+
+
-
-
+
+
+Nolds
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Navigation
+
+
+
Related Topics
- Documentation overview
@@ -62,33 +81,24 @@ Related Topics
-
- This Page
-
- - Show Source
-
-
-
- Quick search
-
-
-
+
+
+
+
+
+
+
+
- ©2016-2018, Christopher Schölzel.
+ ©2016-2024, Christopher Schölzel.
|
- Powered by Sphinx 1.6.6
- & Alabaster 0.7.10
+ Powered by Sphinx 8.0.2
+ & Alabaster 1.0.0
|