Skip to content

Commit

Permalink
delinting, black (#214)
Browse files Browse the repository at this point in the history
* delinting, black

* Update src/qp/factory.py

Sadly, we don't mangage pdfs

Co-authored-by: Melissa DeLucchi <[email protected]>

---------

Co-authored-by: Melissa DeLucchi <[email protected]>
  • Loading branch information
eacharles and delucchi-cmu committed Nov 29, 2023
1 parent 2258988 commit c968675
Show file tree
Hide file tree
Showing 54 changed files with 1,980 additions and 1,269 deletions.
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ disable = [
"duplicate-code",
"use-dict-literal",
"broad-exception-caught",
"consider-using-f-string",
]
max-line-length = 110
max-locals = 50
Expand Down
14 changes: 13 additions & 1 deletion src/qp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,19 @@
from .scipy_pdfs import *
from .packed_interp_pdf import *
from .ensemble import Ensemble
from .factory import instance, add_class, create, read, read_metadata, convert, concatenate, iterator, data_length, from_tables, is_qp_file
from .factory import (
instance,
add_class,
create,
read,
read_metadata,
convert,
concatenate,
iterator,
data_length,
from_tables,
is_qp_file,
)
from .lazy_modules import *

from . import utils
Expand Down
125 changes: 68 additions & 57 deletions src/qp/conversion_funcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,11 @@
from scipy import interpolate as sciinterp

from .lazy_modules import mixture
from .sparse_rep import (build_sparse_representation, decode_sparse_indices,
indices2shapes)
from .sparse_rep import (
build_sparse_representation,
decode_sparse_indices,
indices2shapes,
)


def extract_vals_at_x(in_dist, **kwargs):
Expand All @@ -29,8 +32,8 @@ def extract_vals_at_x(in_dist, **kwargs):
data : `dict`
The extracted data
"""
xvals = kwargs.pop('xvals', None)
if xvals is None: # pragma: no cover
xvals = kwargs.pop("xvals", None)
if xvals is None: # pragma: no cover
raise ValueError("To convert to extract_xy_vals you must specify xvals")
yvals = in_dist.pdf(xvals)
return dict(xvals=xvals, yvals=yvals)
Expand All @@ -54,8 +57,8 @@ def extract_xy_vals(in_dist, **kwargs):
data : `dict`
The extracted data
"""
xvals = kwargs.pop('xvals', None)
if xvals is None: # pragma: no cover
xvals = kwargs.pop("xvals", None)
if xvals is None: # pragma: no cover
raise ValueError("To convert using extract_xy_vals you must specify xvals")
yvals = in_dist.pdf(xvals)
expand_x = np.ones(yvals.shape) * np.squeeze(xvals)
Expand All @@ -80,8 +83,8 @@ def extract_samples(in_dist, **kwargs):
data : `dict`
The extracted data
"""
samples = in_dist.rvs(size=kwargs.pop('size', 1000))
xvals = kwargs.pop('xvals')
samples = in_dist.rvs(size=kwargs.pop("size", 1000))
xvals = kwargs.pop("xvals")
return dict(samples=samples, xvals=xvals, yvals=None)


Expand All @@ -103,8 +106,8 @@ def extract_hist_values(in_dist, **kwargs):
data : `dict`
The extracted data
"""
bins = kwargs.pop('bins', None)
if bins is None: # pragma: no cover
bins = kwargs.pop("bins", None)
if bins is None: # pragma: no cover
raise ValueError("To convert using extract_hist_samples you must specify bins")
bins, pdfs = in_dist.histogramize(bins)
return dict(bins=bins, pdfs=pdfs)
Expand All @@ -130,15 +133,18 @@ def extract_hist_samples(in_dist, **kwargs):
data : `dict`
The extracted data
"""
bins = kwargs.pop('bins', None)
size = kwargs.pop('size', 1000)
if bins is None: # pragma: no cover
bins = kwargs.pop("bins", None)
size = kwargs.pop("size", 1000)
if bins is None: # pragma: no cover
raise ValueError("To convert using extract_hist_samples you must specify bins")
samples = in_dist.rvs(size=size)

def hist_helper(sample):
return np.histogram(sample, bins=bins)[0]
vv = np.vectorize(hist_helper, signature="(%i)->(%i)" % (samples.shape[0], bins.size-1))

vv = np.vectorize(
hist_helper, signature="(%i)->(%i)" % (samples.shape[0], bins.size - 1)
)
pdfs = vv(samples)
return dict(bins=bins, pdfs=pdfs)

Expand All @@ -161,14 +167,14 @@ def extract_quantiles(in_dist, **kwargs):
data : `dict`
The extracted data
"""
quants = kwargs.pop('quants', None)
if quants is None: # pragma: no cover
quants = kwargs.pop("quants", None)
if quants is None: # pragma: no cover
raise ValueError("To convert using extract_quantiles you must specify quants")
locs = in_dist.ppf(quants)
return dict(quants=quants, locs=locs)


def extract_fit(in_dist, **kwargs): # pragma: no cover
def extract_fit(in_dist, **kwargs): # pragma: no cover
"""Convert to a functional distribution by fitting it to a set of x and y values
Parameters
Expand All @@ -186,9 +192,9 @@ def extract_fit(in_dist, **kwargs): # pragma: no cover
data : `dict`
The extracted data
"""
raise NotImplementedError('extract_fit')
#xvals = kwargs.pop('xvals', None)
#if xvals is None:
raise NotImplementedError("extract_fit")
# xvals = kwargs.pop('xvals', None)
# if xvals is None:
# raise ValueError("To convert using extract_fit you must specify xvals")
##vals = in_dist.pdf(xvals)

Expand All @@ -215,10 +221,11 @@ def extract_mixmod_fit_samples(in_dist, **kwargs):
data : `dict`
The extracted data
"""
n_comps = kwargs.pop('ncomps', 3)
n_sample = kwargs.pop('nsamples', 1000)
random_state = kwargs.pop('random_state', None)
n_comps = kwargs.pop("ncomps", 3)
n_sample = kwargs.pop("nsamples", 1000)
random_state = kwargs.pop("random_state", None)
samples = in_dist.rvs(size=n_sample, random_state=random_state)

def mixmod_helper(samps):
estimator = mixture.GaussianMixture(n_components=n_comps)
estimator.fit(samps.reshape(-1, 1))
Expand All @@ -230,9 +237,12 @@ def mixmod_helper(samps):

vv = np.vectorize(mixmod_helper, signature="(%i)->(3,%i)" % (n_sample, n_comps))
fit_vals = vv(samples)
return dict(weights=fit_vals[:, 0, :], means=fit_vals[:, 1, :], stds=fit_vals[:, 2, :])
return dict(
weights=fit_vals[:, 0, :], means=fit_vals[:, 1, :], stds=fit_vals[:, 2, :]
)

def extract_voigt_mixmod(in_dist, **kwargs): #pragma: no cover

def extract_voigt_mixmod(in_dist, **kwargs): # pragma: no cover
"""Convert to a voigt mixture model starting with a gaussian mixture model,
trivially by setting gammas to 0
Expand All @@ -247,14 +257,14 @@ def extract_voigt_mixmod(in_dist, **kwargs): #pragma: no cover
The extracted data
"""
objdata = in_dist.objdata()
means = objdata['means']
stds = objdata['stds']
weights = objdata['weights']
means = objdata["means"]
stds = objdata["stds"]
weights = objdata["weights"]
gammas = np.zeros_like(means)
return dict(means=means, stds=stds, weights=weights, gammas=gammas, **kwargs)


def extract_voigt_xy(in_dist, **kwargs): #pragma: no cover
def extract_voigt_xy(in_dist, **kwargs): # pragma: no cover
"""Build a voigt function basis and run a match-pursuit algorithm to fit gridded data
Parameters
Expand All @@ -269,14 +279,14 @@ def extract_voigt_xy(in_dist, **kwargs): #pragma: no cover
"""

sparse_results = extract_voigt_xy_sparse(in_dist, **kwargs)
indices = sparse_results['indices']
meta = sparse_results['metadata']
indices = sparse_results["indices"]
meta = sparse_results["metadata"]

w, m, s, g = indices2shapes(indices, meta)
return dict(means=m, stds=s, weights=w, gammas=g)


def extract_voigt_xy_sparse(in_dist, **kwargs): #pragma: no cover
def extract_voigt_xy_sparse(in_dist, **kwargs): # pragma: no cover
"""Build a voigt function basis and run a match-pursuit algorithm to fit gridded data
Parameters
Expand All @@ -290,11 +300,11 @@ def extract_voigt_xy_sparse(in_dist, **kwargs): #pragma: no cover
The extracted data as shaped parameters means, stds, weights, gammas
"""

yvals = in_dist.objdata()['yvals']
yvals = in_dist.objdata()["yvals"]

default = in_dist.metadata()['xvals'][0]
z = kwargs.pop('xvals', default)
nz = kwargs.pop('nz', 300)
default = in_dist.metadata()["xvals"][0]
z = kwargs.pop("xvals", default)
nz = kwargs.pop("nz", 300)

minz = np.min(z)
_, j = np.where(yvals > 0)
Expand All @@ -306,7 +316,8 @@ def extract_voigt_xy_sparse(in_dist, **kwargs): #pragma: no cover
ALL, bigD, _ = build_sparse_representation(newz, newpdf)
return dict(indices=ALL, metadata=bigD)

def extract_sparse_from_xy(in_dist, **kwargs): #pragma: no cover

def extract_sparse_from_xy(in_dist, **kwargs): # pragma: no cover
"""Extract sparse representation from an xy interpolated representation
Parameters
Expand All @@ -333,26 +344,26 @@ def extract_sparse_from_xy(in_dist, **kwargs): #pragma: no cover
This function will rebin to a grid more suited to the in_dist support by
removing x-values corrsponding to y=0
"""
default = in_dist.objdata()['yvals']
yvals = kwargs.pop('yvals', default)
default = in_dist.metadata()['xvals'][0]
xvals = kwargs.pop('xvals', default)
nvals = kwargs.pop('nvals', 300)
#rebin to a grid more suited to the in_dist support
default = in_dist.objdata()["yvals"]
yvals = kwargs.pop("yvals", default)
default = in_dist.metadata()["xvals"][0]
xvals = kwargs.pop("xvals", default)
nvals = kwargs.pop("nvals", 300)
# rebin to a grid more suited to the in_dist support
xmin = np.min(xvals)
_, j = np.where(yvals > 0)
xmax = np.max(xvals[j])
newx = np.linspace(xmin, xmax, nvals)
interp = sciinterp.interp1d(xvals, yvals, assume_sorted=True)
newpdf = interp(newx)
sparse_indices, metadata, _ = build_sparse_representation(newx, newpdf)
metadata['xvals'] = newx
metadata['sparse_indices'] = sparse_indices
metadata.pop('Ntot')
metadata["xvals"] = newx
metadata["sparse_indices"] = sparse_indices
metadata.pop("Ntot")
return metadata


def extract_xy_sparse(in_dist, **kwargs): #pragma: no cover
def extract_xy_sparse(in_dist, **kwargs): # pragma: no cover
"""Extract xy-interpolated representation from an sparese representation
Parameters
Expand Down Expand Up @@ -380,28 +391,28 @@ def extract_xy_sparse(in_dist, **kwargs): #pragma: no cover
removing x-values corrsponding to y=0
"""

yvals = in_dist.objdata()['yvals']
default = in_dist.metadata()['xvals'][0]
xvals = kwargs.pop('xvals', default)
nvals = kwargs.pop('nvals', 300)
#rebin to a grid more suited to the in_dist support
yvals = in_dist.objdata()["yvals"]
default = in_dist.metadata()["xvals"][0]
xvals = kwargs.pop("xvals", default)
nvals = kwargs.pop("nvals", 300)
# rebin to a grid more suited to the in_dist support
xmin = np.min(xvals)
_, j = np.where(yvals > 0)
xmax = np.max(xvals[j])
newx = np.linspace(xmin, xmax, nvals)
interp = sciinterp.interp1d(xvals, yvals, assume_sorted=True)
newpdf = interp(newx)
sparse_indices, sparse_meta, A = build_sparse_representation(newx, newpdf)
#decode the sparse indices into basis indices and weights
# decode the sparse indices into basis indices and weights
basis_indices, weights = decode_sparse_indices(sparse_indices)
#retrieve the weighted array of basis functions for each object
# retrieve the weighted array of basis functions for each object
pdf_y = A[:, basis_indices] * weights
#normalize and sum the weighted pdfs
x = sparse_meta['z']
# normalize and sum the weighted pdfs
x = sparse_meta["z"]
y = pdf_y.sum(axis=-1)
norms = sciint.trapz(y.T, x)
y /= norms
#super(sparse_gen, self).__init__(x, y.T, *args, **kwargs)
# super(sparse_gen, self).__init__(x, y.T, *args, **kwargs)
xvals = x
yvals = y.T
return dict(xvals=xvals, yvals=yvals, **kwargs)
Loading

0 comments on commit c968675

Please sign in to comment.