diff --git a/build/lib/seg1d/__init__.py b/build/lib/seg1d/__init__.py deleted file mode 100644 index 722c4a7..0000000 --- a/build/lib/seg1d/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -""" seg1d: Python module for automated 1D subsequence segmentation -Copyright (C) 2020 Mathew Schwartz -""" - -from . _about import __version__ -import os - -from . segment import Segmenter, segment_data - -__all__ = ['Segmenter', 'segment_data', 'sampleData'] - - -def get_data_dir(): - """Returns the directory of the package. - """ - return os.path.join(os.path.dirname(__file__), 'examples', 'data') - - -def sampleData(c=0.8): - """ Helper function for accessing sample data. - - Parameters - ---------- - - c : float, optional - the minimum correlation weights to load from the sample dataset - - """ - - import numpy as np - - data_dir = get_data_dir() - - refWeights = np.load(os.path.join(data_dir, 'w.npy'), allow_pickle=True)[()] - refData = np.load(os.path.join(data_dir, 'r.npy'), allow_pickle=True) - targData = np.load(os.path.join(data_dir, 't.npy'), allow_pickle=True)[()] - - # define which weights to use - refWeights = {x: y for x, y in refWeights.items() if y > c} - - return refData, targData, refWeights diff --git a/build/lib/seg1d/_about.py b/build/lib/seg1d/_about.py deleted file mode 100644 index caa8677..0000000 --- a/build/lib/seg1d/_about.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '0.0.16' \ No newline at end of file diff --git a/build/lib/seg1d/_version.py b/build/lib/seg1d/_version.py deleted file mode 100644 index 1aa4adf..0000000 --- a/build/lib/seg1d/_version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '0.0.13' \ No newline at end of file diff --git a/build/lib/seg1d/algorithm.py b/build/lib/seg1d/algorithm.py deleted file mode 100644 index aaa5044..0000000 --- a/build/lib/seg1d/algorithm.py +++ /dev/null @@ -1,533 +0,0 @@ -''' -.. module:: algorithm - :platform: Unix, Windows - :synopsis: Algorithms to handle 1D subsequence segmentation of data. - -''' - -from operator import itemgetter -from copy import deepcopy -import warnings - -import numpy as np -from scipy.signal import find_peaks -from scipy.interpolate import interp1d -from sklearn.cluster import AgglomerativeClustering - -from . import optimized_funcs as optf - - -def rolling_corr(x, yData, winSize, cMax=False): - ''' Rolling Correlation - - Calculates the rolling correlation coefficient over the given window sizes - - Parameters - ---------- - x : 1-D array - array of target data - - yData : 2-D array - array of reference data - - winSize : int - scale of the that the reference data should be rescaled to - - Other Parameters - ---------------- - cMax : bool, optional - Use maximum of correlations (Default False) - - Returns - ------- - ndarray - 1-D array of length (size(x) - winSize + 1) - - Warnings - -------- - | The reference data (yData) must be smaller than the target data (x) - AFTER resampling. - | This means if the reference data is length 80, and the target data is - length 100, it will work. However, if the winSize is supposed to be length - 120, the reference will be scaled and correlation will crash. - - - See Also - -------- - - combine_corr : (takes the return of this function) - - - Examples - -------- - - >>> import numpy as np - >>> import seg1d.algorithm as alg - - >>> #make waves - >>> x = np.sin( np.linspace(-np.pi*1, np.pi*1, 20) ) - >>> y = np.sin( np.linspace(-np.pi*2, np.pi*2, 80) ).reshape(4,20) - - >>> #apply rolling correlations with 10 and 15 - >>> alg.rolling_corr(x, y, 10 ) - array([-0.00766151, 0.02078156, 0.03501678, 0.04019572, 0.04211895, - 0.04262637, 0.04211895, 0.04019572, 0.03501678, 0.02078156, - -0.00766151]) - >>> alg.rolling_corr(x, y, 15 ) - array([0.03321832, 0.03972237, 0.04254858, 0.04254858, 0.03972237, - 0.03321832]) - - ''' - - refScaled = resample(yData, winSize) # resample reference data - - assert x.size >= refScaled.shape[1], "Cannot correlate when reference larger than target " - - # get the rolling correlation between reference(s) and target - rCorr = optf.rcor(x, refScaled) - - # stack the correlations - corrs = np.vstack(rCorr) - - # return the mean or max of the correlations of the references - if cMax: return np.max(corrs, axis=0) - else: return np.mean(corrs, axis=0) - - -def combine_corr(x, w, method='m', scale=True): - ''' Combines Weighted Correlation - - Takes in the correlated data results and multiply the weighting values - to each array of data for that feature. - | Combines the results of the weighted features - - - Parameters - ---------- - x : Dict[int,Dict[string,numpy.array]] - ``{scale:{ feature: array([correlations]) } }`` - - w : Dict[string,float] - ``{ feature: weight }`` - - method : {'m','w', 's'} - keyword to use for aggregating feature correlations (default `m`). - Options, m=mean, w=weighted mean, s=sum - - scale : bool, optional - keyword argument for scaling the correlated feature before applying - any of the aggregation methods - - Returns - ------- - - Dict[int,numpy.array] - ``{scale: array([weighted correlations]) }`` - - - See Also - -------- - - rolling_corr : (input for this function) - get_peaks : (takes the return of this function) - - - Examples - -------- - - >>> import random - >>> import numpy as np - >>> import seg1d.algorithm as alg - - >>> #make a convenience function to get a wave for sample data - >>> def s(f1, f2, f3): return np.sin( np.linspace(f1, f2, f3) ) - - >>> x = { - ... 10: {'a': s(-np.pi*0.8, 0, 10), 'b': s(0, np.pi*0.8, 10)}, - ... 20: {'a': s(-np.pi*0.7, 0, 10), 'b': s(0, np.pi*0.7, 10)} - ... } - - Assign some weights and find the averaged value - - >>> w = { 'a': 0.5, 'b': 0.9 } - >>> a = alg.combine_corr(x, w ) - >>> for k,v in a.items(): print(k,v) - 10 [-0.14694631 -0.07296588 0.00666771 0.0857847 0.15825538 0.21846498 - 0.26174865 0.28475292 0.2856955 0.26450336] - 20 [-0.20225425 -0.12293111 -0.03630481 0.0524783 0.13814375 0.21560229 - 0.2802522 0.32825274 0.35675226 0.36405765] - - Change the weight values and see the weighted scores change - - >>> w = { 'a': 0.9, 'b': 0.2 } - >>> a = alg.combine_corr(x, w ) - >>> for k,v in a.items(): print(k,v) - 10 [-0.26450336 -0.3270411 -0.36424081 -0.37322037 -0.35328408 -0.30597655 - -0.23496298 -0.14574528 -0.04523573 0.05877853] - 20 [-0.36405765 -0.39304054 -0.39867347 -0.38062179 -0.33995792 -0.27909765 - -0.20165658 -0.1122354 -0.01614647 0.0809017 ] - - ''' - - cDict = {} - - # iterate through window sizes of data - for win in x: - winData = x[win] - w_list = [] - featRes = [] - # iterate through features of data - for f in winData: - featData = winData[f] - # multiply weights from the table to scale features - if scale: featRes.append( w[f] * featData ) - else: featRes.append(featData) - w_list.append(w[f]) - - # stack numpy arrays and average correlations on each frame - sF = np.vstack(featRes) - if method == 'm': cDict[win] = np.mean(sF, axis=0) - if method == 'w': cDict[win] = np.average(sF, axis=0, weights=w_list) - if method == 's': cDict[win] = np.sum(sF, axis=0) - - return cDict - - -def get_peaks(x, minC=0.7, dst=None): - ''' Peak Detection - - Find the peaks of a data array with a minimum value of a peak - and an optional distance parameter. - - Relies on ``scipy.signal.find_peaks`` - - Parameters - ---------- - - x : Dict[int,List[float]] - ``{scale: [correlations] }`` - - - Other Parameters - ---------------- - - minC : float, optional - -1 to 1 - - dst : real, optional - int or float - - Returns - ------- - n x 3 array - sorted by highest to lowest correlation of form - ``[ scale, correlation , peak index ]`` - - - See Also - -------- - - combine_corr : (input for this function) - uniques : (takes the return of this function) - - - Examples - -------- - - >>> import numpy as np - >>> import seg1d.algorithm as alg - - >>> # convenience function for generating wave - >>> def s(f1, f2, f3): return np.sin( np.linspace(f1, f2, f3) ) - - Define some scales that have correlations - - >>> x = { 10: s(-np.pi*1, np.pi*1, 10), 20: s(-np.pi*2, np.pi*2, 10) } - - Query the peaks in the data - - >>> np.around(alg.get_peaks(x), decimals=7) - array([[10. , 0.9848078, 7. ], - [20. , 0.9848078, 1. ], - [20. , 0.8660254, 6. ]]) - - Define a minimum for the peak - - >>> np.around(alg.get_peaks(x,minC = 0.9), decimals=7) - array([[10. , 0.9848078, 7. ], - [20. , 0.9848078, 1. ]]) - - ''' - - # iterate through each scaled window time periods to find the peaks - peakArr = [] - for wSize in x: - row = x[wSize] - # only take peaks above a height and optional distance - peaks, _ = find_peaks(row, height=minC, distance=dst) - # make an array of correlation,window size, index - peakArr += [[wSize, row[y], y] for y in peaks] - - # sort by highest correlations - sortedPeaks = sorted(peakArr, key=itemgetter(1), reverse=True) - - return sortedPeaks - - -def uniques(sortedPeaks, srcLen): - ''' Unique Segment Identification - - | Find unique segment(s) in a sequence of correlation values. - | Guarantees segments are not overlapping - - Parameters - ---------- - - sortedPeaks : n x 3 array - n x 3 array sorted by highest to lowest correlation - of form ``[ scale (int), correlation(float) , peak index (int) ]`` - - srcLen : int - length of the target data, used to block out possible segments - - - Returns - ------- - - n x 3 array - ``[ start index, end index, correlation ]`` - None - if no segments are found - - - See Also - -------- - - get_peaks : (input for this function) - cluster : (takes in the return of this function) - - Examples - -------- - - >>> import numpy as np - >>> import seg1d.algorithm as alg - - >>> p = [ [10, 0.90, 7 ], - ... [10, 0.89, 8 ], - ... [20, 0.80, 20 ], - ... [25, 0.70, 40 ], - ... ] - - >>> el = 50 - - >>> alg.uniques(p,el) - [[7, 17, 0.9], [20, 40, 0.8], [40, 65, 0.7]] - - ''' - - # make an array to block out the defined segments so they don't overlap - segmentLoc = np.ones((srcLen)) - # empty array for segment groups to use in clustering - segGroups = [] - # go through the correlation list - for peak in sortedPeaks: - # in order of highest correlation, match it to a peak - # find which window the peak is in - wSize = peak[0] - corr = peak[1] - sPos = peak[2] - - # add the window size to start indexS - ePos = sPos+wSize - - # if segment does not overlap - newSeg = segmentLoc[sPos:ePos] - # remove that size from the segment array - if (0 == newSeg).any(): continue - - # store the start and end points of the segment, with the - # corresponding correlation - segGroups.append([sPos, ePos, corr]) - segmentLoc[sPos:ePos].fill(0) - - if len(segGroups) == 0: return None - - return segGroups - - -def cluster(segGroups, segAdder=0.5, nClust=2): - ''' Clustering - - Clusters segments based on correlation values - - Parameters - ---------- - segGroups : n x 3 array - ``[ [ start index, end index, correlation ] ]`` - - segAdder : float, optional - 0.0 to 1.0 or None - If not None, the value that is added to the cluster groups to force - a correlation cluster of the highest values - - - Other Parameters - ---------------- - - nClust : int, optional - number of clusters to group data in (Default 2) - - If ``nClust=0``, returns segGroups - - - Returns - ------- - - n x 3 array - ``[start segment, end segment, correlation score of segment]`` - - - Warns - ----- - - Segment Adder value was included in final cluster. - This may mean cluster is poorly defined or Adder is too high. - It is removed before being returned. However, it may be a sign of - poor clustering settings as the intention of the segment adder is to - force clustering of highly similar segments by creating a lower group - (therefore, it should not be in the high cluster group). - - See Also - -------- - - uniques : (input for this function) - - Examples - -------- - - >>> import numpy as np - >>> import seg1d.algorithm as alg - - >>> x = [[7, 17, 0.90], [20, 40, 0.88], [40, 65, 0.8], [50, 65, 0.70]] - >>> alg.cluster(x) - [[7, 17, 0.9], [20, 40, 0.88], [40, 65, 0.8], [50, 65, 0.7]] - >>> alg.cluster(x,segAdder=None) - [[7, 17, 0.9], [20, 40, 0.88], [40, 65, 0.8]] - >>> alg.cluster(x,segAdder=0.85) - [[7, 17, 0.9], [20, 40, 0.88], [40, 65, 0.8]] - - Note: This should raise the following warning: - - UserWarning: Segment Adder value was included in final cluster. - This may mean cluster is poorly defined or Adder is too high. - - >>> alg.cluster(x,nClust=3) - [[7, 17, 0.9], [20, 40, 0.88], [40, 65, 0.8]] - >>> alg.cluster(x,segAdder=None,nClust=3) - [[7, 17, 0.9], [20, 40, 0.88]] - - - ''' - - if nClust == 0: return segGroups - - segGroups = deepcopy(segGroups) - - # Add a correlation of the lower threshold to force a cluster - # of good data if necessary - if segAdder is not None: segGroups.append([-1, -1, segAdder]) - - # Check for incorrect segments by clustering - corrVals = [x[2] for x in segGroups] - - # define the x value series - x_grid = np.ones((len(corrVals),)) - x = list(zip(x_grid, corrVals)) - - # use clustering to find the most likely reference segments - cluster = AgglomerativeClustering(n_clusters=nClust, - affinity='euclidean', - linkage='single') - cluster.fit_predict(x) - - # retrieve only highest ranked - segClust = [] - # since correlations are sorted, first cluster label is the desired cluster - topClust = cluster.labels_[0] - for i, label in enumerate(cluster.labels_): - if label != topClust: continue - segClust.append(segGroups[i]) - - if [-1, -1, segAdder] in segClust: - warnings.warn('Segment Adder value was included in final cluster.' - 'This may mean cluster is poorly defined \ - or Adder is too high.', - stacklevel=2) - segClust.remove([-1, -1, segAdder]) - - return segClust - - -def resample(x, s): - ''' - Interpolation - - Apply a cubic interpolation on an n x m dataset that is resampled - to the number of samples - - Parameters - ---------- - x : n x m array - n-number of datasets with length m - - s : int - number of samples to interpolate x - - Returns - ------- - n x s array - interpolated dataset - - - See Also - -------- - - cluster : (input for this function) - resample : (takes in the return of this function) - - - Examples - -------- - - >>> import numpy as np - >>> import seg1d.algorithm as alg - - >>> x = np.sin( np.linspace(-3, 3, 10) ) - >>> alg.resample(x,6) - array([[-0.14112001, -0.97319156, -0.56423116, 0.56423116, 0.97319156, - 0.14112001]]) - >>> x = np.array([x,x**2]) - >>> alg.resample(x,6) - array([[-0.14112001, -0.97319156, -0.56423116, 0.56423116, 0.97319156, - 0.14112001], - [ 0.01991486, 0.94687756, 0.31972116, 0.31972116, 0.94687756, - 0.01991486]]) - - ''' - if x.ndim == 1: x = np.array([x]) - - def interSub(y): - f = interp1d(range(y.size), y, kind='cubic') - return f - - def getInter(y): - p = np.linspace(0, y.size-1, num=s, endpoint=True) - return interSub(y)(p) - - # make an empty numpty array to store reinterpolated data - resampled = np.empty((x.shape[0], s), dtype=np.float64) - # tore the interpolated data for each array - for i in range(0, len(x)): - resampled[i] = getInter(x[i]) - - return resampled diff --git a/build/lib/seg1d/examples/__init__.py b/build/lib/seg1d/examples/__init__.py deleted file mode 100644 index 2c82e15..0000000 --- a/build/lib/seg1d/examples/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from . import * - -__all__ = ['ex_simple', 'ex_gauss', 'ex_ecg', 'ex_sine', 'noise', - 'ex_sine_noise', 'ex_segmenter_features', 'ex_segmenter_sine'] diff --git a/build/lib/seg1d/examples/data/r.npy b/build/lib/seg1d/examples/data/r.npy deleted file mode 100644 index a78cea8..0000000 Binary files a/build/lib/seg1d/examples/data/r.npy and /dev/null differ diff --git a/build/lib/seg1d/examples/data/t.npy b/build/lib/seg1d/examples/data/t.npy deleted file mode 100644 index 9d03dfa..0000000 Binary files a/build/lib/seg1d/examples/data/t.npy and /dev/null differ diff --git a/build/lib/seg1d/examples/data/w.npy b/build/lib/seg1d/examples/data/w.npy deleted file mode 100644 index 58a7298..0000000 Binary files a/build/lib/seg1d/examples/data/w.npy and /dev/null differ diff --git a/build/lib/seg1d/examples/ex_ecg.py b/build/lib/seg1d/examples/ex_ecg.py deleted file mode 100644 index 5372792..0000000 --- a/build/lib/seg1d/examples/ex_ecg.py +++ /dev/null @@ -1,188 +0,0 @@ -''' -In this example we use the ECG data included with scipy signal module. -The references roughly includes the Q-T interval (https://en.wikipedia.org/wiki/Electrocardiography). -In the first portion, two sample segments are used. While the segments are not aligned, they are able to find some segments correctly. -In the second portion of the example, only one segment is used for the reference data. - -.. plot:: - :context: close-figs - - >>> import random - >>> import numpy as np - >>> from scipy.misc import electrocardiogram - >>> import matplotlib.pyplot as plt - >>> import seg1d - - After imports, the scipy signal ECG data is called and some segments are taken. - - >>> ecg = electrocardiogram() #get the scipy sample data - >>> ref_slices = [[927, 1057],[1111, 1229]] #pick sample endpoints - - >>> s = seg1d.Segmenter() #create the segmenter - - >>> refs = [ ecg[x[0]:x[1]] for x in ref_slices ] - >>> for r in refs: s.add_reference(r) #set reference data - - >>> s.set_target(ecg[1500:3500]) #set the target data to the ecg after ref - >>> segments = s.segment() # run segmenter with defaults - - >>> print(np.around(segments,decimals=7)) - [[1.607000e+03 1.729000e+03 8.169533e-01] - [7.380000e+02 8.220000e+02 8.123868e-01] - [9.190000e+02 1.003000e+03 8.120505e-01] - [1.439000e+03 1.552000e+03 8.092366e-01] - [3.600000e+02 4.930000e+02 8.077664e-01] - [1.091000e+03 1.213000e+03 8.043364e-01] - [1.775000e+03 1.895000e+03 7.998723e-01] - [1.720000e+02 3.000000e+02 7.926582e-01] - [1.268000e+03 1.340000e+03 7.847107e-01] - [5.540000e+02 6.280000e+02 7.802931e-01]] - - The reference data is automatically scaled to the largest reference in the dataset - when the ``segment`` method is called. Therefore, by retrieving this attribute - we can plot what the reference set looks like when the lengths are normalized. - - In the example, it is clear the peaks of the reference segments are not aligned. - This discrepency, due to the averaging of all reference data items, will be seen - in the final segments of the target data later. - - >>> refs = s.r - >>> refs = np.asarray( [ x[y] for x in refs for y in x ] ) - - >>> plt.figure(figsize=(5,3)) # doctest: +SKIP - >>> plt.plot(refs.T) # doctest: +SKIP - >>> plt.show() # doctest: +SKIP - -.. plot:: - :context: close-figs - - The final segments are shown by calling the property ``t_masked`` which returns the - target data as an ndarray with NaN values for areas not found to be segments. - - - >>> plt.figure(figsize=(15,3)) # doctest: +SKIP - >>> plt.plot(s.t_masked.T) # doctest: +SKIP - >>> plt.show() # doctest: +SKIP - -.. plot:: - :context: close-figs - - >>> #use only 1 reference - >>> s.clear_reference() - >>> s.add_reference( ecg[927:1057] ) - - >>> refs = s.r - >>> refs = np.asarray( [ x[y] for x in refs for y in x ] ) - - >>> plt.figure(figsize=(5,3)) # doctest: +SKIP - >>> plt.plot(refs.T) # doctest: +SKIP - >>> plt.show() # doctest: +SKIP - -.. plot:: - :context: close-figs - - >>> #remove first part of data (contains reference) - >>> s.set_target(ecg[1500:3500]) - >>> s.nC = 2 - >>> s.cMin = 0.7 - - >>> segments = s.segment() - - >>> print(np.around(segments,decimals=7)) - [[7.350000e+02 8.540000e+02 9.462850e-01] - [1.093000e+03 1.213000e+03 9.242974e-01] - [9.140000e+02 1.046000e+03 9.059727e-01] - [3.620000e+02 4.980000e+02 9.009127e-01] - [5.470000e+02 6.800000e+02 8.940106e-01] - [1.262000e+03 1.390000e+03 8.868629e-01] - [1.776000e+03 1.902000e+03 8.771139e-01] - [1.609000e+03 1.729000e+03 8.689476e-01] - [1.440000e+03 1.559000e+03 8.646669e-01] - [1.730000e+02 3.060000e+02 8.029426e-01]] - - >>> res = s.t_masked - - >>> plt.figure(figsize=(15,3)) # doctest: +SKIP - >>> plt.plot(res.T) # doctest: +SKIP - >>> plt.show() # doctest: +SKIP - - -''' - - -if __name__ == "__main__": - - import random - import numpy as np - from scipy.misc import electrocardiogram - import matplotlib.pyplot as plt - import seg1d - - # In this example we use the ECG data included with scipy signal module. - # The references roughly includes the Q-T interval (https://en.wikipedia.org/wiki/Electrocardiography). - # In the first portion, two sample segments are used. While the segments are not aligned, they are able to find some segments correctly. - # In the second portion of the example, only one segment is used for the reference data. - - - ecg = electrocardiogram() # get the scipy sample data - ref_slices = [[927, 1057],[1111, 1229]] # pick sample endpoints - - s = seg1d.Segmenter() # create the segmenter - - refs = [ ecg[x[0]:x[1]] for x in ref_slices ] - for r in refs: s.add_reference(r) # set reference data - - s.set_target(ecg[1500:3500]) # set the target data to the ecg after ref - segments = s.segment() # run segmenter with defaults - - print(np.around(segments, decimals=7)) - # [[1.607000e+03 1.729000e+03 8.169533e-01] - # [7.380000e+02 8.220000e+02 8.123868e-01] - # [9.190000e+02 1.003000e+03 8.120505e-01] - # [1.439000e+03 1.552000e+03 8.092366e-01] - # [3.600000e+02 4.930000e+02 8.077664e-01] - # [1.091000e+03 1.213000e+03 8.043364e-01] - # [1.775000e+03 1.895000e+03 7.998723e-01] - # [1.720000e+02 3.000000e+02 7.926582e-01] - # [1.268000e+03 1.340000e+03 7.847107e-01] - # [5.540000e+02 6.280000e+02 7.802931e-01]] - - refs = s.r - refs = np.asarray( [ x[y] for x in refs for y in x ] ) - - plt.figure(figsize=(5, 3)) - plt.plot(refs.T) - plt.show() - - plt.figure(figsize=(15, 3)) - plt.plot(s.t_masked.T) - plt.show() - - # use only 1 reference - s.clear_reference() - s.add_reference( ecg[927:1057] ) - # remove first part of data (contains reference) - s.set_target(ecg[1500:3500]) - s.nC = 2 - s.cMin = 0.7 - - segments = s.segment() - - print(np.around(segments, decimals=7)) - # [[7.350000e+02 8.540000e+02 9.462850e-01] - # [1.093000e+03 1.213000e+03 9.242974e-01] - # [9.140000e+02 1.046000e+03 9.059727e-01] - # [3.620000e+02 4.980000e+02 9.009127e-01] - # [5.470000e+02 6.800000e+02 8.940106e-01] - # [1.262000e+03 1.390000e+03 8.868629e-01] - # [1.776000e+03 1.902000e+03 8.771139e-01] - # [1.609000e+03 1.729000e+03 8.689476e-01] - # [1.440000e+03 1.559000e+03 8.646669e-01] - # [1.730000e+02 3.060000e+02 8.029426e-01]] - - res = s.t_masked - - plt.figure(figsize=(15, 3)) - plt.plot(res.T) - plt.show() - diff --git a/build/lib/seg1d/examples/ex_gauss.py b/build/lib/seg1d/examples/ex_gauss.py deleted file mode 100644 index b3f6f80..0000000 --- a/build/lib/seg1d/examples/ex_gauss.py +++ /dev/null @@ -1,94 +0,0 @@ -''' - -.. plot:: - - >>> import seg1d - >>> import numpy as np - >>> import matplotlib.pylab as plt - >>> import scipy.signal as signal - - >>> # create an array of data - >>> x = np.linspace(-1, 1, 2000) - >>> # get an array of data from a Gaussian pulse - >>> targ = signal.gausspulse(x, fc=5) - - >>> # define a segment within the sine wave to use as reference - >>> t_s,t_e = 950,1050 - >>> # cut a segment out to use as a reference data - >>> refData = [ { 'gauss' : targ[t_s:t_e] } ] - >>> targData = { 'gauss' : targ } - >>> refWeights = { 'gauss' : 1 } - - >>> ### define some test parameters - >>> minWin = 98 #minimum percent to scale down reference data - >>> maxWin = 105 #maximum percent to scale up reference data - >>> sizeStep = 1 #step to use for correlating reference to target data - - >>> # call the segmentation algorithm - >>> segments = seg1d.segment_data(refData,targData,refWeights,minWin,maxWin,sizeStep) - >>> print(np.around(segments,decimals=7)) - [[9.500000e+02 1.050000e+03 1.000000e+00] - [7.550000e+02 8.540000e+02 9.867665e-01] - [1.146000e+03 1.245000e+03 9.867665e-01] - [1.343000e+03 1.441000e+03 9.498135e-01] - [5.590000e+02 6.570000e+02 9.498135e-01] - [1.540000e+03 1.638000e+03 8.949109e-01] - [3.620000e+02 4.600000e+02 8.949109e-01] - [1.738000e+03 1.836000e+03 8.301899e-01] - [1.640000e+02 2.620000e+02 8.301899e-01]] - - >>> plt.figure(figsize=(15,4)) # doctest: +SKIP - >>> # plot the full pulse - >>> plt.plot(x, targ,linewidth=6,alpha=0.2,label='Target') # doctest: +SKIP - >>> # plot the original reference segment - >>> plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=8,alpha=0.5,label='Reference') # doctest: +SKIP - >>> # plot all segments found - >>> for s,e,c in segments: - ... plt.plot(x[s:e], targ[s:e],dashes=[0.5,0.5],linewidth=4,alpha=0.8,label='Segments') # doctest: +SKIP - >>> plt.legend() # doctest: +SKIP - >>> plt.show() # doctest: +SKIP - -''' - - - -if __name__ == "__main__": - - - import seg1d - import numpy as np - import matplotlib.pylab as plt - import scipy.signal as signal - - # create an array of data - x = np.linspace(-1, 1, 2000) - # get an array of data from a Gaussian pulse - targ = signal.gausspulse(x, fc=5) - - # define a segment within the pulse to use as reference - t_s, t_e = 950, 1050 - # cut a segment out to use as a reference data - refData = [{'gauss' : targ[t_s:t_e]}] - targData = {'gauss' : targ} - refWeights = {'gauss' : 1} - - ### define some test parameters - minWin = 98 # minimum percent to scale down reference data - maxWin = 105 # maximum percent to scale up reference data - sizeStep = 1 # step to use for correlating reference to target data - - # call the segmentation algorithm - segments = seg1d.segment_data(refData,targData,refWeights,minWin,maxWin,sizeStep) - print(np.around(segments, decimals=7)) - - plt.figure(figsize=(15, 4)) - # plot the full pulse - plt.plot(x, targ, linewidth=6, alpha=0.2, label='Target') - # plot the original reference segment - plt.plot(x[t_s:t_e], targ[t_s:t_e], linewidth=8, alpha=0.5, label='Reference') - # plot all segments found - for s, e, c in segments: - plt.plot(x[s:e], targ[s:e],dashes=[0.5,0.5],linewidth=4,alpha=0.8,label='Segments') - plt.legend() - plt.show() - diff --git a/build/lib/seg1d/examples/ex_multi_signal.py b/build/lib/seg1d/examples/ex_multi_signal.py deleted file mode 100644 index 5dbc7e3..0000000 --- a/build/lib/seg1d/examples/ex_multi_signal.py +++ /dev/null @@ -1,38 +0,0 @@ -import seg1d -import numpy as np -import matplotlib.pylab as plt -import scipy.signal as signal - -#create an array of data -x1 = np.linspace(-1, 1, 2000) -x = np.linspace(-1, 1, 4000) -#get an array of data from a gauss pulse and sawtooth function -targ = signal.gausspulse(x1, fc=7) -targ = np.append(targ,signal.sawtooth(2 * np.pi * 5 * x1)) - -#define a segment within the sine wave to use as reference -t_s,t_e = 950,1050 -#cut a segment out to use as a reference data -refData = [ { 'npsin' : targ[t_s:t_e] } ] -targData = { 'npsin' : targ } -refWeights = { 'npsin' : 1 } - -### define some test parameters -minWin = 98 #minimum percent to scale down reference data -maxWin = 105 #maximum percent to scale up reference data -sizeStep = 1 #step to use for correlating reference to target data - -#call the segmentation algorithm -segments = seg1d.segment_data(refData,targData,refWeights,minWin,maxWin,sizeStep) -print(segments) - -#plot the full sine wave -plt.plot(x, targ,linewidth=8,alpha=0.5,label='Target') -#plot the original reference segment -plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=6,alpha=0.7,label='Reference') -#plot all segments found -for s,e,c in segments: - plt.plot(x[s:e], targ[s:e],dashes=[0.5,0.5],linewidth=4,alpha=0.8,label='Segment') -plt.legend() -plt.show() - diff --git a/build/lib/seg1d/examples/ex_segment_class.py b/build/lib/seg1d/examples/ex_segment_class.py deleted file mode 100644 index 3557096..0000000 --- a/build/lib/seg1d/examples/ex_segment_class.py +++ /dev/null @@ -1,28 +0,0 @@ -''' - >>> import seg1d - >>> #retrieve the sample reference, target, and weight data - >>> r,t,w = seg1d.sampleData() - >>> # define some test parameters - >>> minW = 70 #minimum percent to scale down reference data - >>> maxW = 150 #maximum percent to scale up reference data - >>> step = 1 #step to use for correlating reference to target data - >>> #call the segmentation algorithm - >>> seg1d.segment_data(r,t,w,minW,maxW,step) - [[207, 240, 0.9124223704844657], [342, 381, 0.880190111545897], [72, 112, 0.8776795468035664]] - -''' - -import seg1d - -#retrieve the sample reference, target, and weight data -r,t,w = seg1d.sampleData() - -### define some test parameters -minW = 70 #minimum percent to scale down reference data -maxW = 150 #maximum percent to scale up reference data -step = 1 #step to use for correlating reference to target data - -#call the segmentation algorithm -segments = seg1d.segment_data(r,t,w,minW,maxW,step) - -print(segments) \ No newline at end of file diff --git a/build/lib/seg1d/examples/ex_segmenter_features.py b/build/lib/seg1d/examples/ex_segmenter_features.py deleted file mode 100644 index ac2e308..0000000 --- a/build/lib/seg1d/examples/ex_segmenter_features.py +++ /dev/null @@ -1,146 +0,0 @@ -''' -.. plot:: - :context: close-figs - - >>> import numpy as np - >>> import matplotlib.pylab as plt - >>> import seg1d - - >>> #retrieve the sample reference, target, and weight data - >>> r,t,w = seg1d.sampleData(c=0.5) - - Note: The reference data shown here is centered at 0 on the y axis (vertical). - As the algorithm process is based on the shape of the curve, it is irrelevant - what this offset is. - - >>> # plot reference data - >>> plt_r = np.asarray( [ x for y in r for x in y.values() ] ).T - >>> plt.figure(figsize=(3,3)) # doctest: +SKIP - >>> plt.plot(plt_r,alpha=0.3) # doctest: +SKIP - >>> plt.show() # doctest: +SKIP - -.. plot:: - :context: close-figs - - >>> # plot target data - >>> plt_t = np.asarray( [ x for x in t.values() ] ) - >>> plt.figure(figsize=(15,4)) # doctest: +SKIP - >>> plt.plot(plt_t.T,alpha=0.5) # doctest: +SKIP - >>> plt.show() # doctest: +SKIP - -.. plot:: - :context: close-figs - - >>> #Make an instance of the segmenter - >>> s = seg1d.Segmenter() - >>> #set scaling parameters - >>> s.minW,s.maxW,s.step = 98, 105, 1 - >>> #Set target and reference data - >>> s.t, s.r, s.w = t,r,w - >>> #call the segmentation algorithm - >>> segments = s.segment() - >>> print(np.around(segments,decimals=7)) - [[204. 245. 0.7128945] - [ 70. 112. 0.6670482] - [340. 382. 0.6630886]] - - >>> plt_t = s.t_masked #get a NaN masked array of the target data - - >>> # plot masked target - >>> plt.figure(figsize=(15,4)) # doctest: +SKIP - >>> plt.plot(plt_t.T,alpha=0.5) # doctest: +SKIP - >>> plt.show() # doctest: +SKIP - -.. plot:: - :context: close-figs - - To use a subset of the features, the weights can be redefined, - which may result in a different segmentation result - - >>> sub = [('C7','z'),('T10','z'),('CLAV','z')] - >>> s.w = { x: w[x] for x in sub } - >>> segments = s.segment() - - >>> print(np.around(segments,decimals=7)) - [[ 2. 44. 0.9648465] - [341. 383. 0.9646419] - [203. 244. 0.9644605] - [273. 314. 0.9640178] - [ 72. 113. 0.9632458] - [139. 180. 0.9624551]] - - >>> plt_t = s.t_masked #get a NaN masked array of the target data - - >>> # plot masked target - >>> plt.figure(figsize=(15,4)) # doctest: +SKIP - >>> plt.plot(plt_t.T,alpha=0.5) # doctest: +SKIP - >>> plt.show() # doctest: +SKIP - -.. plot:: - :context: close-figs - -''' - -if __name__ == '__main__': - - import numpy as np - import matplotlib.pylab as plt - import seg1d - - #retrieve the sample reference, target, and weight data - r,t,w = seg1d.sampleData(c=0.5) - - # Note: The reference data shown here is centered at 0 on the y axis (vertical). - # As the algorithm process is based on the shape of the curve, it is irrelevant - # what this offset is. - - # plot reference data - plt_r = np.asarray( [ x for y in r for x in y.values() ] ).T - plt.figure(figsize=(3,3)) - plt.plot(plt_r,alpha=0.3) - plt.show() - - # plot target data - plt_t = np.asarray( [ x for x in t.values() ] ) - plt.figure(figsize=(15,4)) - plt.plot(plt_t.T,alpha=0.5) - plt.show() - - - #Make an instance of the segmenter - s = seg1d.Segmenter() - #set scaling parameters - s.minW,s.maxW,s.step = 98, 105, 1 - #Set target and reference data - s.t, s.r, s.w = t,r,w - #call the segmentation algorithm - segments = s.segment() - print(np.around(segments,decimals=7)) - #[204, 245, 0.7128945157976089], [70, 112, 0.6670481989688246], [340, 382, 0.6630885808206117]] - - plt_t = s.t_masked #get a NaN masked array of the target data - - # plot masked target - plt.figure(figsize=(15,4)) - plt.plot(plt_t.T,alpha=0.5) - plt.show() - - - # to use a subset of the features, the weights can be redefined, - # which may result in a different segmentation result - - sub = [('C7','z'),('T10','z'),('CLAV','z')] - s.w = { x: w[x] for x in sub } - segments = s.segment() - - print(np.around(segments,decimals=7)) - #[[2, 44, 0.9648465496220732], [341, 383, 0.9646419288668043], [203, 244, 0.9644605487326946], [273, 314, 0.9640177603696332], [72, 113, 0.9632458484884389], - #[139, 180, 0.9624551245103149]] - - plt_t = s.t_masked #get a NaN masked array of the target data - - # plot masked target - plt.figure(figsize=(15,4)) - plt.plot(plt_t.T,alpha=0.5) - plt.show() - diff --git a/build/lib/seg1d/examples/ex_segmenter_sine.py b/build/lib/seg1d/examples/ex_segmenter_sine.py deleted file mode 100644 index a7ccb77..0000000 --- a/build/lib/seg1d/examples/ex_segmenter_sine.py +++ /dev/null @@ -1,79 +0,0 @@ -''' -An example of instancing the Segmenter class to use the convenience methods on array data - -.. plot:: - - >>> import seg1d - >>> import numpy as np - >>> import matplotlib.pylab as plt - - Then we generate some data - - >>> x = np.linspace(-np.pi*2, np.pi*2, 2000) #create an array of data - >>> targ = np.sin(x) # target data from a sin function - >>> t_s,t_e = 200,400 # define a sub-series - - To assign the data to the Segmenter, first we create an instance of it and then - use the ``set_target()`` and ``add_reference()`` methods. - - >>> s = seg1d.Segmenter() # instance of the segmenter - >>> s.minW, s.maxW, s.step = 98, 105, 1 # scaling parameters - >>> s.set_target(targ) # set target and reference data - >>> s.add_reference(targ[t_s:t_e]) - >>> segments = s.segment() # run segmentation algorithm - >>> np.around(segments, decimals=7) - array([[2.000000e+02, 4.000000e+02, 1.000000e+00], - [1.200000e+03, 1.398000e+03, 9.999999e-01]]) - - Using matplotlib we can visualize the results - - >>> plt.figure(figsize=(10,3)) #doctest: +SKIP - >>> #plot the full sine wave - >>> plt.plot(x, targ,linewidth=8,alpha=0.2,label='Target') #doctest: +SKIP - >>> #plot the original reference segment - >>> plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=6,alpha=0.7,label='Reference') #doctest: +SKIP - >>> - >>> #plot all segments found - >>> for s,e,c in segments: - ... plt.plot(x[s:e], targ[s:e],dashes=[1,1],linewidth=4,alpha=0.8,label='Segment') #doctest: +SKIP - >>> plt.legend() #doctest: +SKIP - >>> plt.show() #doctest: +SKIP - -''' - -if __name__ == '__main__': - - import seg1d - import numpy as np - import matplotlib.pylab as plt - - # create an array of data - x = np.linspace(-np.pi*2, np.pi*2, 2000) - # get an array of data from a sin function - targ = np.sin(x) - # define a segment within the sine wave to use as reference - t_s, t_e = 200, 400 - - # Make an instance of the segmenter - s = seg1d.Segmenter() - - # set scaling parameters - s.minW, s.maxW, s.step = 98, 105, 1 - # Set target and reference data - s.set_target(targ) - s.add_reference(targ[t_s:t_e]) - # call the segmentation algorithm - segments = s.segment() - - print(segments) - - plt.figure(figsize=(10, 3)) - # plot the full sine wave - plt.plot(x, targ, linewidth=8, alpha=0.2, label='Target') - # plot the original reference segment - plt.plot(x[t_s:t_e], targ[t_s:t_e], linewidth=6, alpha=0.7, label='Reference') - # plot all segments found - for s, e, c in segments: - plt.plot(x[s:e], targ[s:e],dashes=[1,1],linewidth=4,alpha=0.8,label='Segment') - plt.legend() - plt.show() diff --git a/build/lib/seg1d/examples/ex_simple.py b/build/lib/seg1d/examples/ex_simple.py deleted file mode 100644 index 0fc26e1..0000000 --- a/build/lib/seg1d/examples/ex_simple.py +++ /dev/null @@ -1,35 +0,0 @@ -''' -Example using included sample data - ->>> import seg1d ->>> import numpy as np ->>> #retrieve the sample reference, target, and weight data ->>> r,t,w = seg1d.sampleData() ->>> ### define some test parameters ->>> minW = 70 #minimum percent to scale down reference data ->>> maxW = 150 #maximum percent to scale up reference data ->>> step = 1 #step to use for correlating reference to target data ->>> #call the segmentation algorithm ->>> np.around( seg1d.segment_data(r,t,w,minW,maxW,step) , decimals=7 ) -array([[207. , 240. , 0.9124224], - [342. , 381. , 0.8801901], - [ 72. , 112. , 0.8776795]]) - -''' - -if __name__ == "__main__": - - import seg1d - - #retrieve the sample reference, target, and weight data - r,t,w = seg1d.sampleData() - - ### define some test parameters - minW = 70 #minimum percent to scale down reference data - maxW = 150 #maximum percent to scale up reference data - step = 1 #step to use for correlating reference to target data - - #call the segmentation algorithm - segments = seg1d.segment_data(r,t,w,minW,maxW,step) - - print(segments) \ No newline at end of file diff --git a/build/lib/seg1d/examples/ex_sine.py b/build/lib/seg1d/examples/ex_sine.py deleted file mode 100644 index df99cdd..0000000 --- a/build/lib/seg1d/examples/ex_sine.py +++ /dev/null @@ -1,91 +0,0 @@ -''' -Sample using sine wave - -.. plot:: - - >>> import seg1d - >>> import numpy as np - >>> import matplotlib.pylab as plt - - Data can be constructed as a numpy array - - >>> # create an array of data - >>> x = np.linspace(-np.pi*2, np.pi*2, 2000) - >>> # get an array of data from a sin function - >>> targ = np.sin(x) - - To use the basic method interface, the data must be labeled - - >>> # define a segment within the sine wave to use as reference - >>> t_s,t_e = 200,400 - >>> # cut a segment out to use as a reference data - >>> refData = [ { '0' : targ[t_s:t_e] } ] - >>> targData = {'0' : targ} - >>> refWeights = {'0' : 1} - >>> - >>> ### define some test parameters - >>> minWin = 98 #minimum percent to scale down reference data - >>> maxWin = 105 #maximum percent to scale up reference data - >>> sizeStep = 1 #step to use for correlating reference to target data - >>> - >>> #call the segmentation algorithm - >>> segments = seg1d.segment_data(refData,targData,refWeights,minWin,maxWin,sizeStep) - >>> np.around(segments, decimals=7) - array([[2.000000e+02, 4.000000e+02, 1.000000e+00], - [1.200000e+03, 1.398000e+03, 9.999999e-01]]) - - Using matplotlib we can visualize the results - - >>> plt.figure(figsize=(10,3)) #doctest: +SKIP - >>> # plot the full sine wave - >>> plt.plot(x, targ,linewidth=6,alpha=0.2,label='Target') #doctest: +SKIP - >>> # plot the original reference segment - >>> plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=8,alpha=0.7,label='Reference') #doctest: +SKIP - >>> - >>> # plot all segments found - >>> for s,e,c in segments: - ... plt.plot(x[s:e], targ[s:e],dashes=[1,1],linewidth=4,alpha=0.8,label='Segment') #doctest: +SKIP - >>> plt.legend() #doctest: +SKIP - >>> plt.show() #doctest: +SKIP - -''' - - -if __name__ == "__main__": - - import seg1d - import numpy as np - import matplotlib.pylab as plt - - # create an array of data - x = np.linspace(-np.pi*2, np.pi*2, 2000) - # get an array of data from a sin function - targ = np.sin(x) - - # define a segment within the sine wave to use as reference - t_s,t_e = 200,400 - # cut a segment out to use as a reference data - refData = [ {'0' : targ[t_s:t_e]} ] - targData = {'0' : targ} - refWeights = {'0' : 1} - - ### define some test parameters - minWin = 98 # minimum percent to scale down reference data - maxWin = 105 # maximum percent to scale up reference data - sizeStep = 1 # step to use for correlating reference to target data - - # call the segmentation algorithm - segments = seg1d.segment_data(refData,targData,refWeights,minWin,maxWin,sizeStep) - print(segments) - - plt.figure(figsize=(10,3)) - # plot the full sine wave - plt.plot(x, targ,linewidth=8,alpha=0.2,label='Target') - # plot the original reference segment - plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=6,alpha=0.7,label='Reference') - # plot all segments found - for s,e,c in segments: - plt.plot(x[s:e], targ[s:e],dashes=[1,1],linewidth=4,alpha=0.8,label='Segment') - plt.legend() - plt.show() - diff --git a/build/lib/seg1d/examples/ex_sine_noise.py b/build/lib/seg1d/examples/ex_sine_noise.py deleted file mode 100644 index 4bf84c7..0000000 --- a/build/lib/seg1d/examples/ex_sine_noise.py +++ /dev/null @@ -1,304 +0,0 @@ -''' - -In this example, the attributes of the segmentation algorithm will be -demonstrated through a sine wave with added noise. In this example, the -seed used for the random noise is the same in both the target and reference, -although a different SNR is used. - -.. plot:: - :context: close-figs - - First we import ``seg1d``, a helper function for adding noise in the example called - ``segnoise``, and the plotting utils from ``matplotlib``. - - >>> import seg1d - >>> import numpy as np - >>> import matplotlib.pylab as plt - >>> import seg1d.examples.noise as segnoise - - Next an array of data is generated and a sine wave is created. - A signal-noise ratio of 30 is added to the sine wave. - - >>> # create an array of data - >>> x = np.linspace(-np.pi*2, np.pi*2, 2000) - >>> # get an array of data from a sin function - >>> targ = np.sin(x) - >>> # add noise to the signal - >>> np.random.seed(123) - >>> targ = segnoise.add_noise(targ,snr=30) - - The target data that is used for finding segments in looks like: - - >>> # Plot the target - >>> plt.figure(figsize=(10,3)) #doctest: +SKIP - >>> plt.plot(x, targ,linewidth=4,alpha=0.5,label='Target')#doctest: +SKIP - >>> plt.legend()#doctest: +SKIP - >>> plt.show()#doctest: +SKIP - -.. plot:: - :context: close-figs - - Now another noisy sine wave is created and a segment of it is cut out. - - >>> # define a segment within the sine wave to use as reference - >>> t_s,t_e = 200,400 - >>> # number of reference datasets to generate for the example - - >>> # make reference data with different random noise on a segment of the original - >>> np.random.seed(123) - >>> refData = segnoise.add_noise(np.sin(x),snr=45)[t_s:t_e] - - The reference data looks like: - - >>> plt.figure(figsize=(3,3)) #doctest: +SKIP - >>> # Plot the reference - >>> plt.plot(x[t_s:t_e], refData,linewidth=4,alpha=0.5,label='Reference')#doctest: +SKIP - >>> plt.legend()#doctest: +SKIP - >>> plt.show()#doctest: +SKIP - -.. plot:: - :context: close-figs - - To find the sub-series segment, an instance of the ``Segmenter`` class is created, - basic scaling parameters, and the target and reference data are assigned. - - >>> # Make an instance of the segmenter - >>> s = seg1d.Segmenter() - >>> #set scaling parameters - >>> s.minW,s.maxW,s.step = 90, 110, 1 - >>> #Set target and reference data - >>> s.set_target(targ) - >>> s.add_reference(refData) - >>> #call the segmentation algorithm - >>> segments = s.segment() - >>> np.around(segments, decimals=7) - array([[1.200000e+03, 1.420000e+03, 9.916268e-01], - [2.000000e+02, 4.000000e+02, 9.904041e-01], - [4.000000e+02, 5.820000e+02, 8.933443e-01], - [1.421000e+03, 1.601000e+03, 8.833249e-01]]) - - After running the segmentation algorithm, we plot the segment the reference - data should be located, along with the segments that were found. - - - >>> plt.figure(figsize=(10,3))#doctest: +SKIP - >>> #plot the full sine wave - >>> plt.plot(x, targ,linewidth=4,alpha=0.2,label='Target')#doctest: +SKIP - >>> #plot the location of the original reference segment - >>> # NOTE this is just the location, the actual reference data is shown above - >>> plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=6,alpha=0.7,label='Reference')#doctest: +SKIP - >>> #plot all segments found - >>> for seg in segments: - ... st = seg[0] - ... e = seg[1] - ... plt.plot(x[st:e], targ[st:e],dashes=[1,1],linewidth=2,alpha=0.8,label='Segment')#doctest: +SKIP - >>> plt.legend()#doctest: +SKIP - >>> plt.show()#doctest: +SKIP - -.. plot:: - :context: close-figs - - From the plot, it is clear there are segments that do not belong. - By accessing the ``Segmenter`` attributes, the algorithm and this error are better understood (and resolved). - - >>> # First we look at the original segments before clustering - >>> np.around(s.groups, decimals=7) - array([[1.200000e+03, 1.420000e+03, 9.916268e-01], - [2.000000e+02, 4.000000e+02, 9.904041e-01], - [4.000000e+02, 5.820000e+02, 8.933443e-01], - [1.421000e+03, 1.601000e+03, 8.833249e-01], - [5.830000e+02, 7.650000e+02, 7.286635e-01], - [1.602000e+03, 1.782000e+03, 6.541974e-01]]) - - As shown in the output, there are a total of 6 segments found before clustering. - - As the distribution of segments is apporx. [0.99,0.99,0.89,0.88,0.72,0.65], - the attribute, ``Segmenter.cAdd``, (defaults to 0.5) that is added for forcing clusters - only combines the last two values, 0.72 and 0.65 in the lower cluser. - - Modifying this attribute would then change the clusters, for example: - - >>> s.cAdd = 0.8 - >>> np.around(s.segment(), decimals=7) - array([[1.200000e+03, 1.420000e+03, 9.916268e-01], - [2.000000e+02, 4.000000e+02, 9.904041e-01]]) - - - If the attribute is removed, then only the original segments are used in the clustering. - However, this results in the same cluster as the original where the default of ``cAdd`` was 0.5. - - >>> s.cAdd = None - >>> np.around(s.segment(), decimals=7) - array([[1.200000e+03, 1.420000e+03, 9.916268e-01], - [2.000000e+02, 4.000000e+02, 9.904041e-01], - [4.000000e+02, 5.820000e+02, 8.933443e-01], - [1.421000e+03, 1.601000e+03, 8.833249e-01]]) - - Alternatively, the minimum correlation for a given segment can be set with the ``Segmenter.cMin`` attribute. - - >>> s.cMin = 0.9 - >>> np.around(s.segment(),decimals=7) - array([[1.200000e+03, 1.420000e+03, 9.916268e-01]]) - - - Since the ``cAdd`` was removed, the only segments available (higher than 0.9 correlation) - were both 0.99, making the clustering result in a single segment. - - If ``cAdd`` is set back to the default, the segment is correct. - - >>> s.cAdd = 0.5 - >>> segments = s.segment() - >>> np.around(segments, decimals=7) - array([[1.200000e+03, 1.420000e+03, 9.916268e-01], - [2.000000e+02, 4.000000e+02, 9.904041e-01]]) - - Finally, plotting these segments shows the alignment and logical sub-series - identification. - - >>> plt.figure(figsize=(10,3))#doctest: +SKIP - >>> #plot the full sine wave - >>> plt.plot(x, targ,linewidth=4,alpha=0.2,label='Target')#doctest: +SKIP - >>> #plot the original reference segment - >>> plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=6,alpha=0.7,label='Reference')#doctest: +SKIP - >>> #plot all segments found - >>> for seg in segments: - ... s = seg[0] - ... e = seg[1] - ... plt.plot(x[s:e], targ[s:e],dashes=[1,1],linewidth=2,alpha=0.8,label='Segment')#doctest: +SKIP - >>> plt.legend()#doctest: +SKIP - >>> plt.show()#doctest: +SKIP - -.. plot:: - :context: close-figs - -''' - -if __name__ == "__main__": - - import seg1d - import numpy as np - import matplotlib.pylab as plt - import seg1d.examples.noise as segnoise - - #create an array of data - x = np.linspace(-np.pi*2, np.pi*2, 2000) - #get an array of data from a sin function - targ = np.sin(x) - #add noise to the signal - np.random.seed(123) - targ = segnoise.add_noise(targ,snr=30) - - #Plot the target - plt.figure(figsize=(10,3)) #doctest: +SKIP - plt.plot(x, targ,linewidth=4,alpha=0.5,label='Target')#doctest: +SKIP - plt.legend()#doctest: +SKIP - plt.show()#doctest: +SKIP - - - """ - This plot can be displayed inline with a call the ``current_figure`` tag: - - .. image:: PLOT2RST.current_figure - - - And here's a second plot in a *new figure*: - """ - - #define a segment within the sine wave to use as reference - t_s,t_e = 200,400 - #number of reference datasets to generate for the example - - #make reference data with different random noise on a segment of the original - np.random.seed(123) - refData = segnoise.add_noise(np.sin(x),snr=45)[t_s:t_e] - - # Plot the reference - plt.plot(x[t_s:t_e], refData,linewidth=4,alpha=0.5,label='Reference')#doctest: +SKIP - plt.legend() # doctest: +SKIP - plt.show() # doctest: +SKIP - - # Make an instance of the segmenter - s = seg1d.Segmenter() - # set scaling parameters - s.minW,s.maxW,s.step = 90, 110, 1 - # Set target and reference data - s.set_target(targ) - s.add_reference(refData) - # call the segmentation algorithm - segments = s.segment() - print(segments) - - - plt.figure(figsize=(10,3)) # doctest: +SKIP - #plot the full sine wave - plt.plot(x, targ,linewidth=4,alpha=0.2,label='Target') # doctest: +SKIP - #plot the location of the original reference segment - # NOTE this is just the location, the actual reference data is shown above - plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=2,alpha=0.7,label='Reference') # doctest: +SKIP - #plot all segments found - for seg in segments: - st = seg[0] - e = seg[1] - plt.plot(x[st:e], targ[st:e],dashes=[1,1],linewidth=2,alpha=0.8,label='Segment') # doctest: +SKIP - plt.legend() # doctest: +SKIP - plt.show() # doctest: +SKIP - - # From the plot, it is clear there is a segment that doesn't belong. - # By accessing the Segmenter attributes, the algorithm and this error are better understood (and resolved). - - # First we look at the original segments before clustering - print(s.groups) - #[[1200, 1420, 0.9916267987946981], [200, 400, 0.990404095393343], [400, 582, 0.8933442691404018], [1421, 1601, 0.883324901611455], [583, 765, 0.7286635266077575], [1602, 1782, 0.6541974242077591]] - - - # As shown in the output, there are a total of 6 segments found before clustering. - # - # As the distribution of segments is apporx. [0.99,0.99,0.89,0.88,0.72,0.65], - # the attribute, ``Segmenter.cAdd``, (defaults to 0.5) that is added for forcing clusters - # only combines the last two values, 0.72 and 0.65 in the lower cluser. - # - # Modifying this attribute would then change the clusters, for example: - - s.cAdd = 0.8 - print( s.segment() ) - #[[1200, 1420, 0.9916267987946981], [200, 400, 0.990404095393343]] - - - # If the attribute is removed, then only the original segments are used in the clustering. - # However, this results in the same cluster as the original where the default of ``cAdd`` was 0.5. - # - s.cAdd = None - print( s.segment() ) - #[[1200, 1420, 0.9916267987946981], [200, 400, 0.990404095393343], [400, 582, 0.8933442691404018], [1421, 1601, 0.883324901611455]] - - # - # Alternatively, the minimum correlation for a given segment can be set with the ``Segmenter.cMin`` attribute. - # - s.cMin = 0.9 - print( s.segment() ) - #[[1200, 1420, 0.9916267987946981]] - - - # Since the ``cAdd`` was removed, the only segments available (higher than 0.9 correlation) - # were both 0.99, making the clustering result in a single segment. - # - # If ``cAdd`` is set back to the default, the segment is correct. - # - s.cAdd = 0.5 - segments = s.segment() - print(segments) - #[[1200, 1420, 0.9916267987946981], [200, 400, 0.990404095393343]] - - - plt.figure(figsize=(10,3))#doctest: +SKIP - #plot the full sine wave - plt.plot(x, targ,linewidth=4,alpha=0.2,label='Target')#doctest: +SKIP - #plot the original reference segment - plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=2,alpha=0.7,label='Reference')#doctest: +SKIP - #plot all segments found - for seg in segments: - s = seg[0] - e = seg[1] - plt.plot(x[s:e], targ[s:e],dashes=[1,1],linewidth=2,alpha=0.8,label='Segment')#doctest: +SKIP - plt.legend()#doctest: +SKIP - plt.show()#doctest: +SKIP diff --git a/build/lib/seg1d/examples/ex_sine_noise_few.py b/build/lib/seg1d/examples/ex_sine_noise_few.py deleted file mode 100644 index b30a61d..0000000 --- a/build/lib/seg1d/examples/ex_sine_noise_few.py +++ /dev/null @@ -1,100 +0,0 @@ -import seg1d -import numpy as np -import matplotlib.pylab as plt -import seg1d.examples.noise as segnoise - -#create an array of data -x = np.linspace(-np.pi*2, np.pi*2, 2000) -#get an array of data from a sin function -targ = np.sin(x) -#add noise to the signal -targ = segnoise.add_noise(targ,snr=40) - -#Plot the target -plt.figure(figsize=(10,3)) #doctest: +SKIP -plt.plot(x, targ,linewidth=4,alpha=0.5,label='Target')#doctest: +SKIP -plt.legend()#doctest: +SKIP -plt.show()#doctest: +SKIP - -#define a segment within the sine wave to use as reference -t_s,t_e = 200,400 -#number of reference datasets to generate for the example - -#make reference data with different random noise on a segment of the original -refData = segnoise.add_noise(np.sin(x),snr=45)[t_s:t_e] - -#Plot the reference -plt.plot(x[t_s:t_e], refData,linewidth=4,alpha=0.5,label='Reference')#doctest: +SKIP -plt.legend()#doctest: +SKIP -plt.show()#doctest: +SKIP - -#Make an instance of the segmenter -s = seg1d.Segmenter() -#set scaling parameters -s.minW,s.maxW,s.step = 90, 110, 1 -#Set target and reference data -s.set_target(targ) -s.add_reference(refData) -#call the segmentation algorithm -segments = s.segment() -print(segments) - - -plt.figure(figsize=(10,3))#doctest: +SKIP -#plot the full sine wave -plt.plot(x, targ,linewidth=4,alpha=0.2,label='Target')#doctest: +SKIP -#plot the location of the original reference segment -# NOTE this is just the location, the actual reference data is shown above -plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=2,alpha=0.7,label='Reference')#doctest: +SKIP -#plot all segments found -for seg in segments: - s = seg[0] - e = seg[1] - plt.plot(x[s:e], targ[s:e],dashes=[1,1],linewidth=2,alpha=0.8,label='Segment')#doctest: +SKIP -plt.legend()#doctest: +SKIP -plt.show()#doctest: +SKIP - -# From the plot, it is clear there is a segment that doesn't belong. -# By accessing the Segmenter attributes, the algorithm and this error are better understood (and resolved). - -# First we look at the original segments before clustering -print(s.groups) - -# It turns out these are the same number of segments as the final. -# This happens as the clustering algorithm adds a correlation to force 2 clusters. -# This Attribute, ``Segmenter.cAdd``, defaults to 0.5. -# In this example, that sets the correlation values to (approx.) 0.99,0.99,0.86,0.5 -# Modifying this attribute would then change the clusters, for example: - -s.cAdd = 0.8 -print( s.segment() ) - -# Likewise, it is the presence of this added variable that causes the problem and removing it resolves the issue. -s.cAdd = None -print( s.segment() ) - -# If the target data is expected to be highly similar to the reference data, the best solution is to set ``cAdd`` to None. -# -# Alternatively, the minimum correlation for a given segment can be set with the ``Segmenter.cMin`` attribute. -s.cMin = 0.9 -print( s.segment() ) - -# Since the ``cAdd`` was removed, the only segments available were both 0.99, making the clustering result in a single segment. -# If ``cAdd`` is set back to the default, the segment is correct. -s.cAdd = 0.5 -segments = s.segment() -print(segments) - - -plt.figure(figsize=(10,3))#doctest: +SKIP -#plot the full sine wave -plt.plot(x, targ,linewidth=4,alpha=0.2,label='Target')#doctest: +SKIP -#plot the original reference segment -plt.plot(x[t_s:t_e], targ[t_s:t_e],linewidth=2,alpha=0.7,label='Reference')#doctest: +SKIP -#plot all segments found -for seg in segments: - s = seg[0] - e = seg[1] - plt.plot(x[s:e], targ[s:e],dashes=[1,1],linewidth=2,alpha=0.8,label='Segment')#doctest: +SKIP -plt.legend()#doctest: +SKIP -plt.show()#doctest: +SKIP diff --git a/build/lib/seg1d/examples/ex_sine_scale.py b/build/lib/seg1d/examples/ex_sine_scale.py deleted file mode 100644 index 64bd909..0000000 --- a/build/lib/seg1d/examples/ex_sine_scale.py +++ /dev/null @@ -1,44 +0,0 @@ -import seg1d -import numpy as np -import matplotlib.pylab as plt - -#create an array of data -x = np.linspace(-np.pi*2, np.pi*2, 2000) -#get an array of data from a sin function -targ = np.sin(x) - -#make another array of sine wave data scaled larger -x2 = np.linspace(-np.pi*1, np.pi*1, 2000) -#get an array of data from a sin function -refSin = np.sin(x2) - -#define a segment within the sine wave to use as reference -t_s,t_e = 1000,2000 -#cut a segment out to use as a reference data -refData = [ { 'npsin' : refSin[t_s:t_e] } ] -targData = { 'npsin' : targ } -refWeights = { 'npsin' : 1 } - -### define some test parameters -minWin = 50 #minimum percent to scale down reference data -maxWin = 200 #maximum percent to scale up reference data -sizeStep = 1 #step to use for correlating reference to target data - -#call the segmentation algorithm -segments = seg1d.segment_data(refData,targData,refWeights,minWin,maxWin,sizeStep) -print(segments) - -#plot the part of the reference data -plt.plot(x2, refSin,linewidth=8) -plt.plot(x2[1000:2000],refSin[1000:2000]) -plt.show() - -#plot the full sine wave -plt.plot(x, targ,linewidth=8) - -#plot all segments found -for seg in segments: - s = seg[0] - e = seg[1] - plt.plot(x[s:e], targ[s:e],dashes=[1,1],linewidth=6) -plt.show() \ No newline at end of file diff --git a/build/lib/seg1d/examples/noise.py b/build/lib/seg1d/examples/noise.py deleted file mode 100644 index 81a8bfa..0000000 --- a/build/lib/seg1d/examples/noise.py +++ /dev/null @@ -1,18 +0,0 @@ -import numpy as np - -def add_noise(a, snr = 20): - x_watts = a ** 2 - # Set a target SNR - target_snr_db = snr - # Calculate signal power and convert to dB - sig_avg_watts = np.mean(x_watts) - sig_avg_db = 10 * np.log10(sig_avg_watts) - # Calculate noise according to [2] then convert to watts - noise_avg_db = sig_avg_db - target_snr_db - noise_avg_watts = 10 ** (noise_avg_db / 10) - # Generate an sample of white noise - mean_noise = 0 - noise_volts = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(x_watts)) - - b = a + noise_volts - return b \ No newline at end of file diff --git a/build/lib/seg1d/examples/test.py b/build/lib/seg1d/examples/test.py deleted file mode 100644 index 8eb7d66..0000000 --- a/build/lib/seg1d/examples/test.py +++ /dev/null @@ -1,49 +0,0 @@ -'''A method for a user to run the doctests without building documentation -Suggested use: - -from seg1d.examples import test -test.run() -''' - -print('Imported Testing Setup') -print('Use: test.run() to start tests') - -from . import * -import seg1d - - -def run(): - import doctest - - print("\nTesting GAUSS Example") - print(doctest.testmod(ex_gauss)) - - print("\nTesting SIMPLE Example") - print(doctest.testmod(ex_simple)) - - print("\nTesting ECG Example") - print(doctest.testmod(ex_ecg)) - - print("\nTesting SINE Example") - print(doctest.testmod(ex_sine)) - - print("\nTesting SINE NOISE Example") - print(doctest.testmod(ex_sine_noise)) - - print("\nTesting SEGMENTER FEATURES Example") - print(doctest.testmod(ex_segmenter_features)) - - print("\nTesting SEGMENTER SINE Example") - print(doctest.testmod(ex_segmenter_sine)) - - print("\nTesting Segmenter Class") - print(doctest.testmod(seg1d.segment)) - - print("\nTesting Algorithm Methods") - print(doctest.testmod(seg1d.algorithm)) - - print("Finished running tests \n") - - -if __name__ == '__main__': - run() diff --git a/build/lib/seg1d/optimized_funcs.py b/build/lib/seg1d/optimized_funcs.py deleted file mode 100644 index 2fff0bf..0000000 --- a/build/lib/seg1d/optimized_funcs.py +++ /dev/null @@ -1,134 +0,0 @@ -''' -.. module:: optimized_funcs - :platform: Unix, Windows - :synopsis: optimized functions. - -''' - -import numpy as np -import numba - - -@numba.jit(nopython=True, fastmath=True) -def rcor(x, Y): - ''' - Correlation of multiple arrays to a single array using a rolling - window correlation. - - Parameters - ---------- - x : 1d array - target array - - Y : ndarray - references resampled to correct size - - Returns - ------- - n x m array - correlations of one ndarray to an m x ndarray - - - Notes - ----- - This will try to use numba for optimization. - - - Examples - -------- - - >>> import numpy as np - >>> import seg1d.optimized_funcs as optF - - >>> x = np.sin( np.linspace(-3, 3, 25) ) - >>> y = np.sin( np.linspace(-3, 3, 60) ).reshape(3,20) - - >>> optF.rcor(x,y) - array([[-0.50743663, -0.66692675, -0.78849873, -0.87803067, -0.93682968, - -0.96013818], - [ 0.83362263, 0.91097751, 0.94663428, 0.94663428, 0.91097751, - 0.83362263], - [-0.96013818, -0.93682968, -0.87803067, -0.78849873, -0.66692675, - -0.50743663]]) - - ''' - - rSize, w = Y.shape # number of references , size of reference - cSize = x.size-w+1 # size of the rolling correlation array - - # empty array for putting rolling correlation - rCorr = np.empty((rSize, cSize)) - - for i in range(0, rSize): - y = Y[i] - # get the correlation between the values - rCorr[i] = vcor(x, y) - - return rCorr - -@numba.jit(nopython=True, fastmath=True) -def vcor(x, y): - ''' Rolling correlation between two arrays. - Optimized by numba if available - - Parameters - ---------- - x : 1D array - array to use as static data - - y : 1D array - array to use as rolling data - - Returns - ------- - 1D array - correlations at each increment - - ``size = (size(x) - size(y)) + 1`` - - Notes - ----- - Required: ``size(x) > size(y)`` - This will try to use numba for optimization. - - Examples - -------- - - >>> import numpy as np - >>> import seg1d.optimized_funcs as optF - - >>> x = np.sin( np.linspace(-3, 3, 25) ) - >>> y = np.sin( np.linspace(-3, 3, 20) ) - - >>> optF.vcor(x,y) - array([0.83212194, 0.90933756, 0.94493014, 0.94493014, 0.90933756, - 0.83212194]) - - ''' - - ySize = y.size - xSize = x.size - corrs = np.empty(((xSize-ySize)+1,)) - n = ySize - for i in range(0, (xSize-ySize)+1): - X = x[i:i+ySize] - Y = y - xSum = 0.0 - ySum = 0.0 - for k in range(0, n): - xSum += X[k] - ySum += Y[k] - xMean = xSum/n - yMean = ySum/n - num = 0.0 - sumx2 = 0.0 - sumy2 = 0.0 - for j in range(0, n): - xm = (X[j] - xMean) - ym = (Y[j] - yMean) - num += xm*ym - sumx2 += xm*xm - sumy2 += ym*ym - denom = np.sqrt(sumx2 * sumy2) - corrs[i] = num/denom - return corrs diff --git a/build/lib/seg1d/segment.py b/build/lib/seg1d/segment.py deleted file mode 100644 index ec244d9..0000000 --- a/build/lib/seg1d/segment.py +++ /dev/null @@ -1,686 +0,0 @@ -''' -.. module:: segment - :platform: Unix, Windows - :synopsis: Segmentation of 1D data from subsequences. - -''' - -from copy import deepcopy - -import numpy as np - -from . import algorithm as alg - - -class Segmenter: - ''' - Segmentation class that exposes all algorithm parameters and attributes for - advanced access and tuning of segmentation. - - Additional convenience methods for adding reference and target data as - numpy arrays are provided. - - Results of each step of the algorithm process can be accessed through the - class Attributes after running the segmentation. These can likewise be - passed to the algorithms methods described in the documentation. - - - Examples - -------- - Simple usage of the class by directly assigning attributes - using sample data included with this package. - - >>> import seg1d - >>> import numpy as np - >>> - >>> #Make an instance of the segmenter - >>> s = seg1d.Segmenter() - >>> - >>> #retrieve the sample reference, target, and weight data - >>> s.r,s.t,s.w = seg1d.sampleData() - >>> - >>> #set the parameters - >>> s.minW,s.maxW,s.step = 70, 150, 1 - >>> - >>> np.around(s.segment(), decimals=7) - array([[207. , 240. , 0.9124224], - [342. , 381. , 0.8801901], - [ 72. , 112. , 0.8776795]]) - ''' - - def __init__(self): - ''' Initialization of segmentation class and parameters - - - Attributes - ---------- - - r : array of dicts - The reference dataset - t : dict - The target dataset - w : dict - Weights for correlation - - minW : int - minimum percent to scale data - maxW : int - maximum percent to scale data - step : int - step size for rolling correlation - wSizes : list - sizes to use for resampling reference - (can be used instead of minW,maxW,step) - cMax : bool - use maximum in rolling correlation (default False) - cMin : float - -1 to 1, min correlation - cAdd : float - 0 to 1 or None, value to add for forcing clusters (Default 0.5) - pD : None - peak distance to use for scipy peak detection (Default None) - nC : int - number of clusters for correlation results - fMode : {'w', 'm', 's'} - keyword to use for aggregating feature correlations (default `w`). - Options, w=weighted mean, m=mean, s=sum - fScale : bool - scale the feature correlation by its weight before feature - aggregation (Default True) - - tSeg : [] - the target data as segmented arrays - - ''' - # internal attributes - # tLen : int - # length of target data - # rLen : int - # length of reference data - # tF : set - # features of the target data - # rF : set - # features of the reference data - # wF : set - # features of the weights - - # ''' - - self.r = [] # reference data ## if 'r' not in *args - self.t = {} # target data - self.w = {} # weights - - self.tF = set() # features of the target data - self.rF = set() # features of the reference data - self.wF = set() # features of the weights - - self.tLen = 0 # length of target data - self.rLen = 0 # length of reference data - - self.minW = 50 # minimum percent to scale data - self.maxW = 200 # maximum percent to scale data - self.step = 1 # step size for rolling correlation - self.wSizes = [] # sizes to use for resampling reference - - self.cMax = False # use maximum in rolling correlation - self.cMin = 0.5 # min correlation - self.cAdd = 0.5 # value to add for forcing clusters - self.pD = None # peak distance - self.nC = 2 # num clusters - self.fMode = 'm' # method to aggregate weighted features - self.fScale = True # scale the features correlation by the weight - - self.tSeg = [] # the target data as segmented arrays - - self._maxR = 0 # keeps track of reference size - - - @property - def corrs(self): - '''Rolling correlation of reference and target features created by - :func:`algorithm.rolling_corr` - ''' - - resDict = {} - # iterate through a change in percentage of window sizes - for wSize in self.wSizes: - featDict = {} - # for using generated tuples - for featName in self.w: - # make an array of the reference data - r = np.asarray([x[featName] for x in self.r]) - - # get target data of this feature - t = self.t[featName] - - # get the correlation of a reference set across the length of - # the target data - featDict[featName] = alg.rolling_corr(t, r, wSize, cMax=self.cMax) - - # store features for this window size - resDict[wSize] = featDict - - return resDict - - - @property - def peaks(self): - ''' Peaks of the correlations created by :func:`algorithm.get_peaks` - ''' - return alg.get_peaks(self.combined, self.cMin, self.pD) - - - @property - def combined(self): - ''' The averaged correlation of the rolling feature correlation - and the weighting table created by :func:`algorithm.combine_corr` - ''' - return alg.combine_corr(self.corrs, self.w, self.fMode, self.fScale) - - - @property - def clusters(self): - '''Segments reduced by clustering algorithm from - :func:`algorithm.cluster` - ''' - return alg.cluster(self.groups, segAdder=self.cAdd, nClust=self.nC) - - - @property - def groups(self): - ''' Possible segments through parsing overlapping segment locations - defined by :func:`algorithm.uniques` - ''' - return alg.uniques(self.peaks, self.tLen) - - - @property - def t_masked(self): - ''' The target data as ndarray masked with the non-defined - segments as NaNs. - - Useful for plotting, but should not be used for data processing as - dicts are not ordered. - ''' - - # slice and mask the data - _t = np.asarray([x for x in self.t.values()]) - mask_seg = np.concatenate([np.arange(x[0], x[1], 1) - for x in self.clusters]) - mask_arr = np.full(_t.shape, True, dtype=bool) - mask_arr[ :, mask_seg] = False - _t[mask_arr] = np.NaN - - return _t - - - @property - def t_segments(self): - ''' Returns an array of segmented target data - - Parameters - ---------- - None - - Returns - ------- - Segments : List[Dict[str,numpy.array]] - applies the segment endpoints to the given target data *t* on all - features. - - Examples - -------- - >>> import numpy as np - >>> import seg1d - - >>> #create an array of data - >>> x = np.linspace(-np.pi*2, np.pi*2, 500) - >>> #get an array of data from a sin function - >>> targ = np.sin(x) - - >>> #Make an instance of the segmenter - >>> s = seg1d.Segmenter() - >>> #set scaling parameters - >>> s.minW,s.maxW,s.step = 98, 105, 1 - >>> #Set target and reference data - >>> s.set_target(targ) - - >>> #define a segment within the sine wave to use as reference - >>> s.add_reference(targ[75:100]) - >>> #call the segmentation algorithm - >>> segments = s.segment() - >>> np.around(segments, decimals=7) - array([[ 75. , 100. , 1. ], - [324. , 348. , 0.9999992]]) - - >>> s.t_segments - [{'0': array([0.94988243, 0.94170965, 0.93293968, 0.92357809, 0.91363079, - 0.90310412, 0.89200474, 0.88033969, 0.86811636, 0.85534252, - 0.84202625, 0.82817601, 0.81380058, 0.79890907, 0.78351093, - 0.76761592, 0.75123412, 0.73437593, 0.71705202, 0.6992734 , - 0.68105132, 0.66239735, 0.64332332, 0.62384133, 0.60396372])}, {'0': array([0.95374324, 0.94587102, 0.93739898, 0.92833248, 0.91867727, - 0.90843947, 0.89762559, 0.88624247, 0.87429733, 0.86179776, - 0.84875167, 0.83516734, 0.82105338, 0.80641875, 0.79127273, - 0.77562491, 0.75948523, 0.74286391, 0.72577151, 0.70821885, - 0.69021707, 0.67177759, 0.6529121 , 0.63363256])}] - ''' - - self.tSeg = [] - - for c in self.clusters: - self.tSeg.append({ x: y[c[0]:c[1]] for x, y in self.t.items() }) - - return self.tSeg - - - def _process_params(self): - ''' Processes parameters - - If sizes for scaling are not set, uses min,max,step parameter. - If no weights were set, a default of 1 (no weighting) will be applied. - - ''' - - self._ref_size() - - if len(self.w.keys()) == 0: self.w = {x: 1 for x in self.t.keys()} - - self.wF = set(self.w.keys()) - self.tF = set(self.t.keys()) - - for _r in self.r: self.rF.update(_r.keys()) - - self._interp_ref() - - self.tLen = len(list(self.t.values())[0]) - - if len(self.wSizes) == 0: - self._set_scales() - - self._check_compliance() - - - def _check_compliance(self): - ''' Checks data formats and parameters for compliance with - segmentation methods - ''' - - assert 0 not in self.wSizes, "Scaling parameters cannot have 0" - assert self.minW < self.maxW, "Minimum scaling must be less than Maximum" - assert len(self.r) > 0, "Must have at least one reference" - assert len(self.t.values()) > 0, "Must have one target" - assert self.wF.issubset(self.tF), "All weights must exist in target data" - assert self.wF.issubset(self.rF), "All weights must exist in reference data" - - def _ref_size(self): - ''' Find the max length of all reference data - ''' - - def d(x): return max([ len(y) for y in x.values() ]) - def a(x): return max([ d(y) for y in x ]) - - self._maxR = a(self.r) - self.rLen = a(self.r) - - def _set_scales(self): - ''' Sets the window scaling sizes based on the min and - max percent with the step size - ''' - - # define steps for data scaling based on percentage - wScale = range(self.minW, self.maxW+1, self.step) - self.wSizes = set([ int( self.rLen* (x/100.0) ) for x in wScale ]) - - def _interp_ref(self): - ''' Resamples reference data to match the same length - ''' - - _r = deepcopy(self.r) - - for ref in _r: - for f in ref: - ref[f] = alg.resample(ref[f], self._maxR)[0] - - self.r = _r - - - def set_target(self, t, copy=True): - ''' Sets the target data by overiding any existing target. - If the target is not a dict, it will be converted to one. - - Parameters - ---------- - t : dict or ndarray - | Dictionary containing labeled features as keys and values as 1-D - arrays (must be same size). - | ndarray of dimension 1 will be used as a single feature - for the target. - | ndarray of n-dimensions will use rows as unique features. - - copy : bool, optional - If True, will make a deepcopy of the passed parameter (Default True) - - - Returns - ------- - None - - - See Also - -------- - add_reference : Add a reference item - - - Notes - ----- - This is the recommended method for adding a feature. - You can also set the target directly through the - Attribute *t* by ```Segmenter.t = ``` - however, this method ensures the data labels and length or stored - properly. - Setting *t* directly must be done with a dictionary. - - - Examples - -------- - - Target data can be set to a single numpy array. - - >>> import numpy as np - >>> import seg1d - >>> - >>> s = seg1d.Segmenter() - >>> t = np.linspace(0,1,4) - >>> s.set_target(t) - >>> s.t - {'0': array([0. , 0.33333333, 0.66666667, 1. ])} - - Alternatively, you can pass a 2-dimensional array representing - multiple features. - - >>> s = seg1d.Segmenter() - >>> t = np.linspace(0,1,6).reshape(2,3) - >>> s.set_target(t) - >>> s.t - {'0': array([0. , 0.2, 0.4]), '1': array([0.6, 0.8, 1. ])} - - ''' - - assert isinstance(t, (dict, list, np.ndarray)), \ - 'Target must be Dict, List, or Array' - - if copy: - t = deepcopy(t) - self.t = deepcopy(self.t) - - if not isinstance(t, dict): - - if t.ndim == 1: - self.t = {'0': t} - - else: - _tD = {} - for i, _t in enumerate(t): - - _tD[str(i)] = _t - - self.t = _tD - - else: self.t = t - - self.tLen = len(list( self.t.values() )[0]) - self.tF = set(self.t.keys()) - - - def add_reference(self, r, copy=True): - ''' Appends a reference containing one or more features to the existing - reference dataset. - If the reference is not a dict, it will be converted to one. - If this should be the only reference set, use ``clear_reference()`` - before calling this method. - - - Parameters - ---------- - r : dict or ndarray - | Dictionary containing labeled features as keys and values as - 1-D arrays (must be same size). - | ndarray of dimension 1 will be used as a single feature for the - reference. - | ndarray of n-dimensions will use rows as unique features. - - copy : bool, optional - If True, will make a deepcopy of the passed parameter - (Default True). - - - See Also - -------- - set_target : Set the target data - clear_reference: Clear the current reference data - - - Notes - -------- - This method allows features that are not in previous references to be - added, and vice-versa. - It will also allow different sizes of reference data to be added. - This is done as you can explicitly declare which features to use when - segmenting. - - - Examples - -------- - - Add a reference with multiple features - - >>> import seg1d - >>> import numpy as np - >>> - >>> s = seg1d.Segmenter() - >>> r = np.linspace(0,1,6).reshape(2,3) - >>> s.add_reference( r ) - >>> s.r - [{'0': array([0. , 0.2, 0.4]), '1': array([0.6, 0.8, 1. ])}] - - Alternatively, each row of the array can be added as the same labeled - feature for different references by calling this method in a loop. - Notice this is now an array of dictionaries containing the same - feature label. - - >>> s = seg1d.Segmenter() - >>> r = np.linspace(0,1,6).reshape(2,3) - >>> for _r in r: s.add_reference(_r) - >>> s.r - [{'0': array([0. , 0.2, 0.4])}, {'0': array([0.6, 0.8, 1. ])}] - - - ''' - - assert isinstance(r, (dict, np.ndarray)), \ - 'Reference must be Dict or Array' - - if copy: - r = deepcopy(r) - self.r = deepcopy(self.r) - - if not isinstance(r, dict): - - if r.ndim == 1: - self.r.append({'0': r}) - self.rF.add('0') - - else: - _rD = {} - for i, _r in enumerate(r): - - _rD[str(i)] = _r - - self.r.append(_rD) - self.rF.update(_rD.keys()) - - else: - self.r.append(r) - self.rF.update(r.keys()) - - - def clear_reference(self): - ''' Removes any reference data currently assigned - - Parameters - ---------- - None - - - Returns - ------- - None - - - See Also - -------- - add_reference: Add a reference item - - - Notes - ----- - This method also clears the `rF`, and `rLen` attributes. - - Examples - -------- - >>> import numpy as np - >>> import seg1d - >>> - >>> s = seg1d.Segmenter() - >>> s.add_reference( np.linspace(0,3,3) ) - >>> s.r - [{'0': array([0. , 1.5, 3. ])}] - >>> s.clear_reference() - >>> s.r - [] - - ''' - - self.r = [] - self.rF = set() - self.rLen = 0 - self._maxR = 0 - - def segment(self): - ''' Method to run the segmentation algorithm on the current - Segmenter instance - - Parameters - ---------- - - None - - - Returns - ------- - - *3 x n* array - segments of form - ``[start of segment,end of segment,correlation score]`` - - - Examples - -------- - - This example is the same as the main ``Segmenter`` class as it is the - interface method. - - >>> import seg1d - >>> - >>> #Make an instance of the segmenter - >>> s = seg1d.Segmenter() - >>> - >>> #retrieve the sample reference, target, and weight data - >>> s.r,s.t,s.w = seg1d.sampleData() - >>> - >>> #set the parameters - >>> s.minW,s.maxW,s.step = 70, 150, 1 - >>> - >>> s.segment() - [[207, 240, 0.9124223704844657], [342, 381, 0.880190111545897], [72, 112, 0.8776795468035664]] - - ''' - - self._process_params() # generate missing parameters - - return self.clusters - - -def segment_data(r, t, w, minS, maxS, step): - ''' Segmentation manager for interfacing with Segmenter class - - Find segments of a reference dataset in a target dataset using - a rolling correlation of *n* number of reference examples with - a peak detection applied to the average of *m* reference features - with weights applied to each feature. - - Parameters - ---------- - r : List[Dict[key,numpy.array]] - reference data of form - ``[ {(feature Key): [data array] }, {(feature Key): [data array] } ]`` - - t : Dict[key,numpy.array] - target data of form - ``{ (feature Key): [data array] }`` - - w : Dict[key,float] or None - Weights of form - ``{ (feature key):float,(feature key):float }`` - - minS : int - Minimum scale to apply for reference data - - maxS : int - Maximum scale to apply for reference data - - step : int - Size of step to use in rolling correlation - - Returns - ------- - *3 x n* array - segments of form - ``[start of segment,end of segment,correlation score]`` - - Examples - -------- - First we import sample data from the examples folder that has multiple - features derived from motion capture data - - >>> import seg1d - >>> r,t,w = seg1d.sampleData() - - Then we define some segmentation parameters such as the scaling percentage - of the reference data and index stepping to use in rolling correlation - - >>> minW = 70 # percent to scale down reference data - >>> maxW = 150 # percent to scale up reference data - >>> step = 1 #step to use for correlating reference to target data - - Finally we call the segmentation algorithm - - >>> seg1d.segment_data(r,t,w,minW,maxW,step) - [[207, 240, 0.9124223704844657], [342, 381, 0.880190111545897], [72, 112, 0.8776795468035664]] - - - ''' - - # Make an instance of the segmenter - s = Segmenter() - - # set the parameters - s.minW = minS - s.maxW = maxS - s.step = step - s.t = t - s.r = r - s.w = w - - # return the segments created by the Segmenter - return s.segment() diff --git a/dist/seg1d-0.1.0-py3-none-any.whl b/dist/seg1d-0.1.0-py3-none-any.whl new file mode 100644 index 0000000..75ee450 Binary files /dev/null and b/dist/seg1d-0.1.0-py3-none-any.whl differ diff --git a/dist/seg1d-0.1.0.tar.gz b/dist/seg1d-0.1.0.tar.gz new file mode 100644 index 0000000..115ae76 Binary files /dev/null and b/dist/seg1d-0.1.0.tar.gz differ diff --git a/docs/build/doctest/output.txt b/docs/build/doctest/output.txt index a58e0bc..470dbc2 100644 --- a/docs/build/doctest/output.txt +++ b/docs/build/doctest/output.txt @@ -1,6 +1,14 @@ -Results of doctest builder run on 2020-05-04 19:15:02 +Results of doctest builder run on 2020-05-04 19:29:53 ===================================================== +Document: generated/seg1d.algorithm.cluster +------------------------------------------- +1 items passed all tests: + 8 tests in default +8 tests in 1 items. +8 passed and 0 failed. +Test passed. + Document: generated/seg1d.Segmenter.clear_reference --------------------------------------------------- 1 items passed all tests: @@ -9,12 +17,20 @@ Document: generated/seg1d.Segmenter.clear_reference 7 passed and 0 failed. Test passed. -Document: generated/seg1d.optimized_funcs.rcor +Document: api_ecg +----------------- +1 items passed all tests: + 25 tests in default +25 tests in 1 items. +25 passed and 0 failed. +Test passed. + +Document: generated/seg1d.Segmenter.t_segments ---------------------------------------------- 1 items passed all tests: - 5 tests in default -5 tests in 1 items. -5 passed and 0 failed. + 11 tests in default +11 tests in 1 items. +11 passed and 0 failed. Test passed. Document: generated/seg1d.optimized_funcs.vcor @@ -33,54 +49,6 @@ Document: generated/seg1d.Segmenter.add_reference 10 passed and 0 failed. Test passed. -Document: generated/seg1d.Segmenter.t_segments ----------------------------------------------- -1 items passed all tests: - 11 tests in default -11 tests in 1 items. -11 passed and 0 failed. -Test passed. - -Document: generated/seg1d.algorithm.resample --------------------------------------------- -1 items passed all tests: - 6 tests in default -6 tests in 1 items. -6 passed and 0 failed. -Test passed. - -Document: api_basic -------------------- -1 items passed all tests: - 13 tests in default -13 tests in 1 items. -13 passed and 0 failed. -Test passed. - -Document: api_ecg ------------------ -1 items passed all tests: - 25 tests in default -25 tests in 1 items. -25 passed and 0 failed. -Test passed. - -Document: generated/seg1d.Segmenter ------------------------------------ -1 items passed all tests: - 6 tests in default -6 tests in 1 items. -6 passed and 0 failed. -Test passed. - -Document: generated/seg1d.algorithm.cluster -------------------------------------------- -1 items passed all tests: - 8 tests in default -8 tests in 1 items. -8 passed and 0 failed. -Test passed. - Document: api_feat ------------------ 1 items passed all tests: @@ -89,7 +57,15 @@ Document: api_feat 17 passed and 0 failed. Test passed. -Document: generated/seg1d.algorithm.uniques +Document: generated/seg1d.optimized_funcs.rcor +---------------------------------------------- +1 items passed all tests: + 5 tests in default +5 tests in 1 items. +5 passed and 0 failed. +Test passed. + +Document: generated/seg1d.Segmenter.segment ------------------------------------------- 1 items passed all tests: 5 tests in default @@ -97,12 +73,12 @@ Document: generated/seg1d.algorithm.uniques 5 passed and 0 failed. Test passed. -Document: api_tune ------------------- +Document: generated/seg1d.algorithm.combine_corr +------------------------------------------------ 1 items passed all tests: - 29 tests in default -29 tests in 1 items. -29 passed and 0 failed. + 11 tests in default +11 tests in 1 items. +11 passed and 0 failed. Test passed. Document: start @@ -138,16 +114,24 @@ Got: 37 passed and 1 failed. ***Test Failed*** 1 failures. -Document: generated/seg1d.algorithm.rolling_corr ------------------------------------------------- +Document: api_tune +------------------ +1 items passed all tests: + 29 tests in default +29 tests in 1 items. +29 passed and 0 failed. +Test passed. + +Document: code_simple +--------------------- 1 items passed all tests: 6 tests in default 6 tests in 1 items. 6 passed and 0 failed. Test passed. -Document: code_simple ---------------------- +Document: generated/seg1d.algorithm.get_peaks +--------------------------------------------- 1 items passed all tests: 6 tests in default 6 tests in 1 items. @@ -162,7 +146,7 @@ Document: generated/seg1d.Segmenter.set_target 10 passed and 0 failed. Test passed. -Document: generated/seg1d.Segmenter.segment +Document: generated/seg1d.algorithm.uniques ------------------------------------------- 1 items passed all tests: 5 tests in default @@ -170,16 +154,32 @@ Document: generated/seg1d.Segmenter.segment 5 passed and 0 failed. Test passed. -Document: generated/seg1d.algorithm.combine_corr +Document: generated/seg1d.Segmenter +----------------------------------- +1 items passed all tests: + 6 tests in default +6 tests in 1 items. +6 passed and 0 failed. +Test passed. + +Document: generated/seg1d.algorithm.rolling_corr ------------------------------------------------ 1 items passed all tests: - 11 tests in default -11 tests in 1 items. -11 passed and 0 failed. + 6 tests in default +6 tests in 1 items. +6 passed and 0 failed. Test passed. -Document: generated/seg1d.algorithm.get_peaks ---------------------------------------------- +Document: api_basic +------------------- +1 items passed all tests: + 13 tests in default +13 tests in 1 items. +13 passed and 0 failed. +Test passed. + +Document: generated/seg1d.algorithm.resample +-------------------------------------------- 1 items passed all tests: 6 tests in default 6 tests in 1 items. diff --git a/docs/build/doctrees/api_tune.doctree b/docs/build/doctrees/api_tune.doctree index 9784dd4..4fb3e22 100644 Binary files a/docs/build/doctrees/api_tune.doctree and b/docs/build/doctrees/api_tune.doctree differ diff --git a/docs/build/doctrees/environment.pickle b/docs/build/doctrees/environment.pickle index 8e36d0d..c2cd9a7 100644 Binary files a/docs/build/doctrees/environment.pickle and b/docs/build/doctrees/environment.pickle differ diff --git a/docs/build/html/.buildinfo b/docs/build/html/.buildinfo index 6f2c315..168f534 100644 --- a/docs/build/html/.buildinfo +++ b/docs/build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 270d5ab768ae1f1c18327956dcb8ab7c +config: 3738bd74e6db0940b6bf0884054caf01 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/build/html/_static/documentation_options.js b/docs/build/html/_static/documentation_options.js index 962831a..be2ec8f 100644 --- a/docs/build/html/_static/documentation_options.js +++ b/docs/build/html/_static/documentation_options.js @@ -1,6 +1,6 @@ var DOCUMENTATION_OPTIONS = { URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '0.0.16', + VERSION: '0.1.0', LANGUAGE: 'None', COLLAPSE_INDEX: false, BUILDER: 'html', diff --git a/docs/build/html/api.html b/docs/build/html/api.html index 03e7cbd..364e30f 100644 --- a/docs/build/html/api.html +++ b/docs/build/html/api.html @@ -4,7 +4,7 @@ - API Examples — seg1d 0.0.16 documentation + API Examples — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -50,7 +50,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/api_basic-1.pdf b/docs/build/html/api_basic-1.pdf index 1c32d6d..349d0ef 100644 Binary files a/docs/build/html/api_basic-1.pdf and b/docs/build/html/api_basic-1.pdf differ diff --git a/docs/build/html/api_basic.html b/docs/build/html/api_basic.html index 5302a87..e9d26e2 100644 --- a/docs/build/html/api_basic.html +++ b/docs/build/html/api_basic.html @@ -4,7 +4,7 @@ - Basic Use — seg1d 0.0.16 documentation + Basic Use — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • API Examples
  • diff --git a/docs/build/html/api_ecg-1.pdf b/docs/build/html/api_ecg-1.pdf index a38bcd9..94421d3 100644 Binary files a/docs/build/html/api_ecg-1.pdf and b/docs/build/html/api_ecg-1.pdf differ diff --git a/docs/build/html/api_ecg-2.pdf b/docs/build/html/api_ecg-2.pdf index 9e1fbe8..536cdca 100644 Binary files a/docs/build/html/api_ecg-2.pdf and b/docs/build/html/api_ecg-2.pdf differ diff --git a/docs/build/html/api_ecg-3.pdf b/docs/build/html/api_ecg-3.pdf index 0660855..f6f9813 100644 Binary files a/docs/build/html/api_ecg-3.pdf and b/docs/build/html/api_ecg-3.pdf differ diff --git a/docs/build/html/api_ecg-4.pdf b/docs/build/html/api_ecg-4.pdf index d89adaf..8113b60 100644 Binary files a/docs/build/html/api_ecg-4.pdf and b/docs/build/html/api_ecg-4.pdf differ diff --git a/docs/build/html/api_ecg.html b/docs/build/html/api_ecg.html index 0e25c2f..1123a8e 100644 --- a/docs/build/html/api_ecg.html +++ b/docs/build/html/api_ecg.html @@ -4,7 +4,7 @@ - ECG — seg1d 0.0.16 documentation + ECG — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • API Examples
  • diff --git a/docs/build/html/api_feat-1.pdf b/docs/build/html/api_feat-1.pdf index 8a8f5b2..78c7106 100644 Binary files a/docs/build/html/api_feat-1.pdf and b/docs/build/html/api_feat-1.pdf differ diff --git a/docs/build/html/api_feat-2.pdf b/docs/build/html/api_feat-2.pdf index b8a66de..2b1a3e0 100644 Binary files a/docs/build/html/api_feat-2.pdf and b/docs/build/html/api_feat-2.pdf differ diff --git a/docs/build/html/api_feat-3.pdf b/docs/build/html/api_feat-3.pdf index fa506d1..0bdaf64 100644 Binary files a/docs/build/html/api_feat-3.pdf and b/docs/build/html/api_feat-3.pdf differ diff --git a/docs/build/html/api_feat-4.pdf b/docs/build/html/api_feat-4.pdf index 7efce42..c4fba32 100644 Binary files a/docs/build/html/api_feat-4.pdf and b/docs/build/html/api_feat-4.pdf differ diff --git a/docs/build/html/api_feat.html b/docs/build/html/api_feat.html index b98b0ea..c1557db 100644 --- a/docs/build/html/api_feat.html +++ b/docs/build/html/api_feat.html @@ -4,7 +4,7 @@ - Feature Inclusion — seg1d 0.0.16 documentation + Feature Inclusion — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • API Examples
  • diff --git a/docs/build/html/api_tune-1.pdf b/docs/build/html/api_tune-1.pdf index 98a46de..1a1a95a 100644 Binary files a/docs/build/html/api_tune-1.pdf and b/docs/build/html/api_tune-1.pdf differ diff --git a/docs/build/html/api_tune-2.pdf b/docs/build/html/api_tune-2.pdf index 2e5b848..e6c4df7 100644 Binary files a/docs/build/html/api_tune-2.pdf and b/docs/build/html/api_tune-2.pdf differ diff --git a/docs/build/html/api_tune-3.pdf b/docs/build/html/api_tune-3.pdf index 4822d0d..a27e592 100644 Binary files a/docs/build/html/api_tune-3.pdf and b/docs/build/html/api_tune-3.pdf differ diff --git a/docs/build/html/api_tune-3.py b/docs/build/html/api_tune-3.py index 1b04a64..d3ad9d0 100644 --- a/docs/build/html/api_tune-3.py +++ b/docs/build/html/api_tune-3.py @@ -32,7 +32,7 @@ st = seg[0] e = seg[1] plt.plot(x[st:e], targ[st:e],dashes=[1,1],linewidth=2,alpha=0.8, #doctest: +SKIP - label='Segment {}'.format(seg_num)) #doctest: +SKIP + label='Segment {}'.format(seg_num)) #doctest: +SKIP seg_num += 1 plt.xlabel('Angle [rad]')#doctest: +SKIP plt.ylabel('sin(x)')#doctest: +SKIP diff --git a/docs/build/html/api_tune-4.pdf b/docs/build/html/api_tune-4.pdf index d75d285..9b43a3c 100644 Binary files a/docs/build/html/api_tune-4.pdf and b/docs/build/html/api_tune-4.pdf differ diff --git a/docs/build/html/api_tune-4.py b/docs/build/html/api_tune-4.py index 3ebda41..987138a 100644 --- a/docs/build/html/api_tune-4.py +++ b/docs/build/html/api_tune-4.py @@ -66,7 +66,7 @@ st = seg[0] e = seg[1] plt.plot(x[st:e], targ[st:e],dashes=[1,1],linewidth=2,alpha=0.8, #doctest: +SKIP - label='Segment {}'.format(seg_num)) #doctest: +SKIP + label='Segment {}'.format(seg_num)) #doctest: +SKIP seg_num += 1 plt.xlabel('Angle [rad]')#doctest: +SKIP plt.ylabel('sin(x)')#doctest: +SKIP diff --git a/docs/build/html/api_tune.html b/docs/build/html/api_tune.html index c4519e5..837c145 100644 --- a/docs/build/html/api_tune.html +++ b/docs/build/html/api_tune.html @@ -4,7 +4,7 @@ - Parameter Tuning — seg1d 0.0.16 documentation + Parameter Tuning — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • API Examples
  • @@ -185,7 +185,7 @@

    Parameter Tuning... st = seg[0] ... e = seg[1] ... plt.plot(x[st:e], targ[st:e],dashes=[1,1],linewidth=2,alpha=0.8, -... label='Segment {}'.format(seg_num)) +... label='Segment {}'.format(seg_num)) ... seg_num += 1 >>> plt.xlabel('Angle [rad]') >>> plt.ylabel('sin(x)') @@ -260,7 +260,7 @@

    Parameter Tuning... st = seg[0] ... e = seg[1] ... plt.plot(x[st:e], targ[st:e],dashes=[1,1],linewidth=2,alpha=0.8, -... label='Segment {}'.format(seg_num)) +... label='Segment {}'.format(seg_num)) ... seg_num += 1 >>> plt.xlabel('Angle [rad]') >>> plt.ylabel('sin(x)') diff --git a/docs/build/html/code.html b/docs/build/html/code.html index 040bdc2..a91187c 100644 --- a/docs/build/html/code.html +++ b/docs/build/html/code.html @@ -4,7 +4,7 @@ - Code Reference — seg1d 0.0.16 documentation + Code Reference — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -50,7 +50,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/code_simple.html b/docs/build/html/code_simple.html index 427ff2d..8c7dba4 100644 --- a/docs/build/html/code_simple.html +++ b/docs/build/html/code_simple.html @@ -4,7 +4,7 @@ - Simple Interface — seg1d 0.0.16 documentation + Simple Interface — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • diff --git a/docs/build/html/community.html b/docs/build/html/community.html index 58c10ca..052b722 100644 --- a/docs/build/html/community.html +++ b/docs/build/html/community.html @@ -4,7 +4,7 @@ - Community Guidelines — seg1d 0.0.16 documentation + Community Guidelines — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -49,7 +49,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.add_reference.html b/docs/build/html/generated/seg1d.Segmenter.add_reference.html index 09f363b..ae4c984 100644 --- a/docs/build/html/generated/seg1d.Segmenter.add_reference.html +++ b/docs/build/html/generated/seg1d.Segmenter.add_reference.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.add_reference — seg1d 0.0.16 documentation + seg1d.Segmenter.add_reference — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Segmenter Methods
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.clear_reference.html b/docs/build/html/generated/seg1d.Segmenter.clear_reference.html index dfa5680..fcc5ab4 100644 --- a/docs/build/html/generated/seg1d.Segmenter.clear_reference.html +++ b/docs/build/html/generated/seg1d.Segmenter.clear_reference.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.clear_reference — seg1d 0.0.16 documentation + seg1d.Segmenter.clear_reference — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Segmenter Methods
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.clusters.html b/docs/build/html/generated/seg1d.Segmenter.clusters.html index bddd7a9..acfc138 100644 --- a/docs/build/html/generated/seg1d.Segmenter.clusters.html +++ b/docs/build/html/generated/seg1d.Segmenter.clusters.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.clusters — seg1d 0.0.16 documentation + seg1d.Segmenter.clusters — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -48,7 +48,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.combined.html b/docs/build/html/generated/seg1d.Segmenter.combined.html index bd0a355..b81d51e 100644 --- a/docs/build/html/generated/seg1d.Segmenter.combined.html +++ b/docs/build/html/generated/seg1d.Segmenter.combined.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.combined — seg1d 0.0.16 documentation + seg1d.Segmenter.combined — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -48,7 +48,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.corrs.html b/docs/build/html/generated/seg1d.Segmenter.corrs.html index a190a2c..8584ad6 100644 --- a/docs/build/html/generated/seg1d.Segmenter.corrs.html +++ b/docs/build/html/generated/seg1d.Segmenter.corrs.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.corrs — seg1d 0.0.16 documentation + seg1d.Segmenter.corrs — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -48,7 +48,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.groups.html b/docs/build/html/generated/seg1d.Segmenter.groups.html index c8e72a2..e1bda9a 100644 --- a/docs/build/html/generated/seg1d.Segmenter.groups.html +++ b/docs/build/html/generated/seg1d.Segmenter.groups.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.groups — seg1d 0.0.16 documentation + seg1d.Segmenter.groups — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -48,7 +48,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.html b/docs/build/html/generated/seg1d.Segmenter.html index 1e49774..545a811 100644 --- a/docs/build/html/generated/seg1d.Segmenter.html +++ b/docs/build/html/generated/seg1d.Segmenter.html @@ -4,7 +4,7 @@ - seg1d.Segmenter — seg1d 0.0.16 documentation + seg1d.Segmenter — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Segmenter Class
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.peaks.html b/docs/build/html/generated/seg1d.Segmenter.peaks.html index e583659..6a62517 100644 --- a/docs/build/html/generated/seg1d.Segmenter.peaks.html +++ b/docs/build/html/generated/seg1d.Segmenter.peaks.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.peaks — seg1d 0.0.16 documentation + seg1d.Segmenter.peaks — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -48,7 +48,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.segment.html b/docs/build/html/generated/seg1d.Segmenter.segment.html index 007056b..b560c35 100644 --- a/docs/build/html/generated/seg1d.Segmenter.segment.html +++ b/docs/build/html/generated/seg1d.Segmenter.segment.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.segment — seg1d 0.0.16 documentation + seg1d.Segmenter.segment — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Segmenter Methods
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.set_target.html b/docs/build/html/generated/seg1d.Segmenter.set_target.html index 4e64b69..044e470 100644 --- a/docs/build/html/generated/seg1d.Segmenter.set_target.html +++ b/docs/build/html/generated/seg1d.Segmenter.set_target.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.set_target — seg1d 0.0.16 documentation + seg1d.Segmenter.set_target — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Segmenter Methods
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.t_masked.html b/docs/build/html/generated/seg1d.Segmenter.t_masked.html index c2e2633..a078287 100644 --- a/docs/build/html/generated/seg1d.Segmenter.t_masked.html +++ b/docs/build/html/generated/seg1d.Segmenter.t_masked.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.t_masked — seg1d 0.0.16 documentation + seg1d.Segmenter.t_masked — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -48,7 +48,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/generated/seg1d.Segmenter.t_segments.html b/docs/build/html/generated/seg1d.Segmenter.t_segments.html index 64e507b..b96efba 100644 --- a/docs/build/html/generated/seg1d.Segmenter.t_segments.html +++ b/docs/build/html/generated/seg1d.Segmenter.t_segments.html @@ -4,7 +4,7 @@ - seg1d.Segmenter.t_segments — seg1d 0.0.16 documentation + seg1d.Segmenter.t_segments — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -48,7 +48,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/generated/seg1d.algorithm.cluster.html b/docs/build/html/generated/seg1d.algorithm.cluster.html index 4bb1ef1..6a2b957 100644 --- a/docs/build/html/generated/seg1d.algorithm.cluster.html +++ b/docs/build/html/generated/seg1d.algorithm.cluster.html @@ -4,7 +4,7 @@ - seg1d.algorithm.cluster — seg1d 0.0.16 documentation + seg1d.algorithm.cluster — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Algorithm Methods
  • diff --git a/docs/build/html/generated/seg1d.algorithm.combine_corr.html b/docs/build/html/generated/seg1d.algorithm.combine_corr.html index 2df7600..c7eba1e 100644 --- a/docs/build/html/generated/seg1d.algorithm.combine_corr.html +++ b/docs/build/html/generated/seg1d.algorithm.combine_corr.html @@ -4,7 +4,7 @@ - seg1d.algorithm.combine_corr — seg1d 0.0.16 documentation + seg1d.algorithm.combine_corr — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Algorithm Methods
  • diff --git a/docs/build/html/generated/seg1d.algorithm.get_peaks.html b/docs/build/html/generated/seg1d.algorithm.get_peaks.html index 7f90fcc..8ede626 100644 --- a/docs/build/html/generated/seg1d.algorithm.get_peaks.html +++ b/docs/build/html/generated/seg1d.algorithm.get_peaks.html @@ -4,7 +4,7 @@ - seg1d.algorithm.get_peaks — seg1d 0.0.16 documentation + seg1d.algorithm.get_peaks — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Algorithm Methods
  • diff --git a/docs/build/html/generated/seg1d.algorithm.resample.html b/docs/build/html/generated/seg1d.algorithm.resample.html index ab8882c..03dbe67 100644 --- a/docs/build/html/generated/seg1d.algorithm.resample.html +++ b/docs/build/html/generated/seg1d.algorithm.resample.html @@ -4,7 +4,7 @@ - seg1d.algorithm.resample — seg1d 0.0.16 documentation + seg1d.algorithm.resample — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Algorithm Methods
  • diff --git a/docs/build/html/generated/seg1d.algorithm.rolling_corr.html b/docs/build/html/generated/seg1d.algorithm.rolling_corr.html index cad10c7..cf98884 100644 --- a/docs/build/html/generated/seg1d.algorithm.rolling_corr.html +++ b/docs/build/html/generated/seg1d.algorithm.rolling_corr.html @@ -4,7 +4,7 @@ - seg1d.algorithm.rolling_corr — seg1d 0.0.16 documentation + seg1d.algorithm.rolling_corr — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Algorithm Methods
  • diff --git a/docs/build/html/generated/seg1d.algorithm.uniques.html b/docs/build/html/generated/seg1d.algorithm.uniques.html index feb633b..720d4bf 100644 --- a/docs/build/html/generated/seg1d.algorithm.uniques.html +++ b/docs/build/html/generated/seg1d.algorithm.uniques.html @@ -4,7 +4,7 @@ - seg1d.algorithm.uniques — seg1d 0.0.16 documentation + seg1d.algorithm.uniques — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Algorithm Methods
  • diff --git a/docs/build/html/generated/seg1d.optimized_funcs.rcor.html b/docs/build/html/generated/seg1d.optimized_funcs.rcor.html index 8659f12..ce061e0 100644 --- a/docs/build/html/generated/seg1d.optimized_funcs.rcor.html +++ b/docs/build/html/generated/seg1d.optimized_funcs.rcor.html @@ -4,7 +4,7 @@ - seg1d.optimized_funcs.rcor — seg1d 0.0.16 documentation + seg1d.optimized_funcs.rcor — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Algorithm Methods
  • diff --git a/docs/build/html/generated/seg1d.optimized_funcs.vcor.html b/docs/build/html/generated/seg1d.optimized_funcs.vcor.html index 95915c4..5c141f7 100644 --- a/docs/build/html/generated/seg1d.optimized_funcs.vcor.html +++ b/docs/build/html/generated/seg1d.optimized_funcs.vcor.html @@ -4,7 +4,7 @@ - seg1d.optimized_funcs.vcor — seg1d 0.0.16 documentation + seg1d.optimized_funcs.vcor — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • Algorithm Methods
  • diff --git a/docs/build/html/genindex.html b/docs/build/html/genindex.html index 7991164..1bec919 100644 --- a/docs/build/html/genindex.html +++ b/docs/build/html/genindex.html @@ -5,7 +5,7 @@ - Index — seg1d 0.0.16 documentation + Index — seg1d 0.1.0 documentation @@ -15,7 +15,7 @@ - + @@ -49,7 +49,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/index.html b/docs/build/html/index.html index afb7185..35602ca 100644 --- a/docs/build/html/index.html +++ b/docs/build/html/index.html @@ -4,7 +4,7 @@ - seg1d: subsequence segmentation — seg1d 0.0.16 documentation + seg1d: subsequence segmentation — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + diff --git a/docs/build/html/install.html b/docs/build/html/install.html index a3c1bfd..9c412f7 100644 --- a/docs/build/html/install.html +++ b/docs/build/html/install.html @@ -4,7 +4,7 @@ - Installation — seg1d 0.0.16 documentation + Installation — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -50,7 +50,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/routines.html b/docs/build/html/routines.html index 1bf82b4..4029f0b 100644 --- a/docs/build/html/routines.html +++ b/docs/build/html/routines.html @@ -4,7 +4,7 @@ - Algorithm Methods — seg1d 0.0.16 documentation + Algorithm Methods — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • diff --git a/docs/build/html/search.html b/docs/build/html/search.html index 343be36..9455df0 100644 --- a/docs/build/html/search.html +++ b/docs/build/html/search.html @@ -4,7 +4,7 @@ - Search — seg1d 0.0.16 documentation + Search — seg1d 0.1.0 documentation @@ -15,7 +15,7 @@ - + @@ -53,7 +53,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/html/segmenter.html b/docs/build/html/segmenter.html index 9cb9c20..3df4034 100644 --- a/docs/build/html/segmenter.html +++ b/docs/build/html/segmenter.html @@ -4,7 +4,7 @@ - Segmenter Class — seg1d 0.0.16 documentation + Segmenter Class — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • diff --git a/docs/build/html/segmenter_meth.html b/docs/build/html/segmenter_meth.html index eea7a36..c63c474 100644 --- a/docs/build/html/segmenter_meth.html +++ b/docs/build/html/segmenter_meth.html @@ -4,7 +4,7 @@ - Segmenter Methods — seg1d 0.0.16 documentation + Segmenter Methods — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -51,7 +51,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • Code Reference
  • diff --git a/docs/build/html/start-1.pdf b/docs/build/html/start-1.pdf index 76578db..241ad7a 100644 Binary files a/docs/build/html/start-1.pdf and b/docs/build/html/start-1.pdf differ diff --git a/docs/build/html/start-2.pdf b/docs/build/html/start-2.pdf index 2b59d99..6b1dbaa 100644 Binary files a/docs/build/html/start-2.pdf and b/docs/build/html/start-2.pdf differ diff --git a/docs/build/html/start.html b/docs/build/html/start.html index 3e69bcd..e80fc9f 100644 --- a/docs/build/html/start.html +++ b/docs/build/html/start.html @@ -4,7 +4,7 @@ - Getting Started — seg1d 0.0.16 documentation + Getting Started — seg1d 0.1.0 documentation @@ -14,7 +14,7 @@ - + @@ -50,7 +50,7 @@
  • Github
  • Paper
  • -
  • seg1d 0.0.16 documentation
  • +
  • seg1d 0.1.0 documentation
  • diff --git a/docs/build/plot_directive/api_basic-1.pdf b/docs/build/plot_directive/api_basic-1.pdf index 1c32d6d..349d0ef 100644 Binary files a/docs/build/plot_directive/api_basic-1.pdf and b/docs/build/plot_directive/api_basic-1.pdf differ diff --git a/docs/build/plot_directive/api_ecg-1.pdf b/docs/build/plot_directive/api_ecg-1.pdf index a38bcd9..94421d3 100644 Binary files a/docs/build/plot_directive/api_ecg-1.pdf and b/docs/build/plot_directive/api_ecg-1.pdf differ diff --git a/docs/build/plot_directive/api_ecg-2.pdf b/docs/build/plot_directive/api_ecg-2.pdf index 9e1fbe8..536cdca 100644 Binary files a/docs/build/plot_directive/api_ecg-2.pdf and b/docs/build/plot_directive/api_ecg-2.pdf differ diff --git a/docs/build/plot_directive/api_ecg-3.pdf b/docs/build/plot_directive/api_ecg-3.pdf index 0660855..f6f9813 100644 Binary files a/docs/build/plot_directive/api_ecg-3.pdf and b/docs/build/plot_directive/api_ecg-3.pdf differ diff --git a/docs/build/plot_directive/api_ecg-4.pdf b/docs/build/plot_directive/api_ecg-4.pdf index d89adaf..8113b60 100644 Binary files a/docs/build/plot_directive/api_ecg-4.pdf and b/docs/build/plot_directive/api_ecg-4.pdf differ diff --git a/docs/build/plot_directive/api_feat-1.pdf b/docs/build/plot_directive/api_feat-1.pdf index 8a8f5b2..78c7106 100644 Binary files a/docs/build/plot_directive/api_feat-1.pdf and b/docs/build/plot_directive/api_feat-1.pdf differ diff --git a/docs/build/plot_directive/api_feat-2.pdf b/docs/build/plot_directive/api_feat-2.pdf index b8a66de..2b1a3e0 100644 Binary files a/docs/build/plot_directive/api_feat-2.pdf and b/docs/build/plot_directive/api_feat-2.pdf differ diff --git a/docs/build/plot_directive/api_feat-3.pdf b/docs/build/plot_directive/api_feat-3.pdf index fa506d1..0bdaf64 100644 Binary files a/docs/build/plot_directive/api_feat-3.pdf and b/docs/build/plot_directive/api_feat-3.pdf differ diff --git a/docs/build/plot_directive/api_feat-4.pdf b/docs/build/plot_directive/api_feat-4.pdf index 7efce42..c4fba32 100644 Binary files a/docs/build/plot_directive/api_feat-4.pdf and b/docs/build/plot_directive/api_feat-4.pdf differ diff --git a/docs/build/plot_directive/api_tune-1.pdf b/docs/build/plot_directive/api_tune-1.pdf index 98a46de..1a1a95a 100644 Binary files a/docs/build/plot_directive/api_tune-1.pdf and b/docs/build/plot_directive/api_tune-1.pdf differ diff --git a/docs/build/plot_directive/api_tune-2.pdf b/docs/build/plot_directive/api_tune-2.pdf index 2e5b848..e6c4df7 100644 Binary files a/docs/build/plot_directive/api_tune-2.pdf and b/docs/build/plot_directive/api_tune-2.pdf differ diff --git a/docs/build/plot_directive/api_tune-3.pdf b/docs/build/plot_directive/api_tune-3.pdf index 4822d0d..a27e592 100644 Binary files a/docs/build/plot_directive/api_tune-3.pdf and b/docs/build/plot_directive/api_tune-3.pdf differ diff --git a/docs/build/plot_directive/api_tune-4.pdf b/docs/build/plot_directive/api_tune-4.pdf index d75d285..9b43a3c 100644 Binary files a/docs/build/plot_directive/api_tune-4.pdf and b/docs/build/plot_directive/api_tune-4.pdf differ diff --git a/docs/build/plot_directive/start-1.pdf b/docs/build/plot_directive/start-1.pdf index 76578db..241ad7a 100644 Binary files a/docs/build/plot_directive/start-1.pdf and b/docs/build/plot_directive/start-1.pdf differ diff --git a/docs/build/plot_directive/start-2.pdf b/docs/build/plot_directive/start-2.pdf index 2b59d99..6b1dbaa 100644 Binary files a/docs/build/plot_directive/start-2.pdf and b/docs/build/plot_directive/start-2.pdf differ diff --git a/seg1d.egg-info/PKG-INFO b/seg1d.egg-info/PKG-INFO index 1bc8f90..5b03b1d 100644 --- a/seg1d.egg-info/PKG-INFO +++ b/seg1d.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: seg1d -Version: 0.0.16 +Version: 0.1.0 Summary: Automated one-dimensional subsequence segmentation Home-page: https://github.com/cadop/seg1d Author: Mathew Schwartz @@ -17,6 +17,7 @@ Description: # seg1d ![seg1d](https://raw.githubusercontent.com/cadop/seg1d/master/docs/build/plot_directive/api_basic-1.png) + Example of the segmentation algorithm using a portion of a sine wave. The initial reference segment is found, along with any additional segments. ### Documentation diff --git a/seg1d/_about.py b/seg1d/_about.py index 39d352f..541f859 100644 --- a/seg1d/_about.py +++ b/seg1d/_about.py @@ -1 +1 @@ -__version__ = '0.0.17' \ No newline at end of file +__version__ = '0.1.0' \ No newline at end of file diff --git a/seg1d/examples/ex_sine_noise.py b/seg1d/examples/ex_sine_noise.py index feeab78..0dfb53b 100644 --- a/seg1d/examples/ex_sine_noise.py +++ b/seg1d/examples/ex_sine_noise.py @@ -99,7 +99,7 @@ ... st = seg[0] ... e = seg[1] ... plt.plot(x[st:e], targ[st:e],dashes=[1,1],linewidth=2,alpha=0.8, #doctest: +SKIP - ... label='Segment {}'.format(seg_num)) #doctest: +SKIP + ... label='Segment {}'.format(seg_num)) #doctest: +SKIP ... seg_num += 1 >>> plt.xlabel('Angle [rad]')#doctest: +SKIP >>> plt.ylabel('sin(x)')#doctest: +SKIP @@ -178,13 +178,14 @@ ... st = seg[0] ... e = seg[1] ... plt.plot(x[st:e], targ[st:e],dashes=[1,1],linewidth=2,alpha=0.8, #doctest: +SKIP - ... label='Segment {}'.format(seg_num)) #doctest: +SKIP + ... label='Segment {}'.format(seg_num)) #doctest: +SKIP ... seg_num += 1 >>> plt.xlabel('Angle [rad]')#doctest: +SKIP >>> plt.ylabel('sin(x)')#doctest: +SKIP >>> plt.legend()#doctest: +SKIP >>> plt.tight_layout()#doctest: +SKIP >>> plt.show()#doctest: +SKIP + .. plot:: :context: close-figs