Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unit testing tolerances + more consistent definition of SNR #79

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion WrapImage/nifti_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def loop_over_first_n_minus_1_dimensions(arr):
for idx, view in tqdm(loop_over_first_n_minus_1_dimensions(data), desc=f"{args.algorithm} is fitting", dynamic_ncols=True, total=total_iteration):
fit_result = fit.osipi_fit(view, bvals)
f_image.append(fit_result["f"])
Dp_image.append(fit_result["D*"])
Dp_image.append(fit_result["Dp"])
D_image.append(fit_result["D"])

# Convert lists to NumPy arrays
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,7 @@
{
"data": {
"text/plain": [
"{'f': array(0.04609779), 'D*': array(0.01136011), 'D': array(0.00071134)}"
"{'f': array(0.04609779), 'Dp': array(0.01136011), 'D': array(0.00071134)}"
]
},
"execution_count": 15,
Expand Down Expand Up @@ -569,7 +569,7 @@
{
"data": {
"text/plain": [
"{'f': array(0.04611801), 'D*': array(0.0113541), 'D': array(0.0007113)}"
"{'f': array(0.04611801), 'Dp': array(0.0113541), 'D': array(0.0007113)}"
]
},
"execution_count": 16,
Expand Down Expand Up @@ -636,10 +636,10 @@
"#plot the results of algorithm 1\n",
"plt.subplot(121)\n",
"plt.plot(np.unique(bval),signal_1dir,'x')\n",
"plt.plot(np.unique(bval),fit['f']*np.exp(-np.unique(bval)*fit['D*'])+(1-fit['f'])*np.exp(-np.unique(bval)*fit['D']))\n",
"plt.plot(np.unique(bval),fit['f']*np.exp(-np.unique(bval)*fit['D*']))\n",
"plt.plot(np.unique(bval),fit['f']*np.exp(-np.unique(bval)*fit['Dp'])+(1-fit['f'])*np.exp(-np.unique(bval)*fit['D']))\n",
"plt.plot(np.unique(bval),fit['f']*np.exp(-np.unique(bval)*fit['Dp']))\n",
"plt.plot(np.unique(bval),(1-fit['f'])*np.exp(-np.unique(bval)*fit['D']))\n",
"plt.legend(['measured data','model fit','D*','D'])\n",
"plt.legend(['measured data','model fit','Dp','D'])\n",
"plt.ylabel('S/S0')\n",
"plt.xlabel('b-value [s/mm^2]')\n",
"plt.title('algorithm 1')\n",
Expand All @@ -650,10 +650,10 @@
"#plot the results of algorithm 2\n",
"plt.subplot(122)\n",
"plt.plot(np.unique(bval),signal_1dir,'x')\n",
"plt.plot(np.unique(bval),fit['f']*np.exp(-np.unique(bval)*fit['D*'])+(1-fit['f'])*np.exp(-np.unique(bval)*fit['D']))\n",
"plt.plot(np.unique(bval),fit['f']*np.exp(-np.unique(bval)*fit['D*']))\n",
"plt.plot(np.unique(bval),fit['f']*np.exp(-np.unique(bval)*fit['Dp'])+(1-fit['f'])*np.exp(-np.unique(bval)*fit['D']))\n",
"plt.plot(np.unique(bval),fit['f']*np.exp(-np.unique(bval)*fit['Dp']))\n",
"plt.plot(np.unique(bval),(1-fit['f'])*np.exp(-np.unique(bval)*fit['D']))\n",
"plt.legend(['measured data','model fit','D*','D'])\n",
"plt.legend(['measured data','model fit','Dp','D'])\n",
"plt.ylabel('S/S0')\n",
"plt.xlabel('b-value [s/mm^2]')\n",
"plt.title('algorithm 2')\n"
Expand Down Expand Up @@ -818,7 +818,7 @@
"data": {
"text/plain": [
"{'f': array([0., 0., 0., ..., 0., 0., 0.]),\n",
" 'D*': array([0., 0., 0., ..., 0., 0., 0.]),\n",
" 'Dp': array([0., 0., 0., ..., 0., 0., 0.]),\n",
" 'D': array([0., 0., 0., ..., 0., 0., 0.])}"
]
},
Expand Down
6 changes: 3 additions & 3 deletions phantoms/MR_XCAT_qMRI/sim_ivim_sig.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,9 +459,9 @@ def parse_bvalues_file(file_path):
signals = np.squeeze(voxels[int(voxels.shape[0] * voxel_selector_fraction)]).tolist()
generic_data[name] = {
'noise': noise,
'D': np.mean(Dim[selector], axis=0),
'f': np.mean(fim[selector], axis=0),
'Dp': np.mean(Dpim[selector], axis=0),
'D': np.median(Dim[selector], axis=0),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Median now?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, somehow there was a weird rounding happening in the np.mean that made values become 0.0000000000000001 off or so. So all round values become D= 0.0029999999999 instead of 0.003 etc.

'f': np.median(fim[selector], axis=0),
'Dp': np.median(Dpim[selector], axis=0),
'data': signals
}
generic_data['config'] = {
Expand Down
45 changes: 23 additions & 22 deletions src/original/OGC_AmsterdamUMC/LSQ_fitting.py
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this algorithm wrapped? I just want to make sure these changes are also made there, if necessary.

Original file line number Diff line number Diff line change
Expand Up @@ -124,29 +124,30 @@ def fit_segmented(bvalues, dw_data, bounds=([0, 0, 0.005],[0.005, 0.7, 0.2]), cu
:return Dp: Fitted Dp
:return S0: Fitted S0
"""
p0 = [p0[0] * 1000, p0[1] * 10, p0[2] * 10, p0[3]]
try:
# determine high b-values and data for D
dw_data=dw_data/np.mean(dw_data[bvalues==0])
high_b = bvalues[bvalues >= cutoff]
high_dw_data = dw_data[bvalues >= cutoff]
# correct the bounds. Note that S0 bounds determine the max and min of f
bounds1 = ([bounds[0][0] * 1000., 0.7 - bounds[1][1]], [bounds[1][0] * 1000., 1.3 - bounds[0][
1]]) # By bounding S0 like this, we effectively insert the boundaries of f
# fit for S0' and D
params, _ = curve_fit(lambda b, Dt, int: int * np.exp(-b * Dt / 1000), high_b, high_dw_data,
p0=(p0[0], p0[3]-p0[1]/10),
bounds1 = ([bounds[0][0], 0], [bounds[1][0], 10000000000])
params, _ = curve_fit(lambda b, Dt, int: int * np.exp(-b * Dt ), high_b, high_dw_data,
p0=(p0[0], p0[3]-p0[1]),
bounds=bounds1)
Dt, Fp = params[0] / 1000, 1 - params[1]
Dt, Fp = 0+params[0], 1 - params[1]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

0+params[0], why the 0+ part? I see it in a few other places too.

if Fp < bounds[0][1] : Fp = bounds[0][1]
if Fp > bounds[1][1] : Fp = bounds[1][1]

# remove the diffusion part to only keep the pseudo-diffusion
dw_data_remaining = dw_data - (1 - Fp) * np.exp(-bvalues * Dt)
bounds2 = (bounds[0][2]*10, bounds[1][2]*10)
bounds2 = (bounds[0][2], bounds[1][2])
# fit for D*
params, _ = curve_fit(lambda b, Dp: Fp * np.exp(-b * Dp), bvalues, dw_data_remaining, p0=(p0[2]), bounds=bounds2)
Dp = params[0]
return Dt, Fp, Dp
Dp = 0+params[0]
return Dt, np.float64(Fp), Dp
except:
# if fit fails, return zeros
# print('segnetned fit failed')
# print('segmented fit failed')
return 0., 0., 0.


Expand Down Expand Up @@ -235,17 +236,17 @@ def fit_least_squares(bvalues, dw_data, S0_output=False, fitS0=True,
try:
if not fitS0:
# bounds are rescaled such that each parameter changes at roughly the same rate to help fitting.
bounds = ([bounds[0][0] * 1000, bounds[0][1] * 10, bounds[0][2] * 10],
bounds2 = ([bounds[0][0] * 1000, bounds[0][1] * 10, bounds[0][2] * 10],
[bounds[1][0] * 1000, bounds[1][1] * 10, bounds[1][2] * 10])
p0=[p0[0]*1000,p0[1]*10,p0[2]*10]
params, _ = curve_fit(ivimN_noS0, bvalues, dw_data, p0=p0, bounds=bounds)
params, _ = curve_fit(ivimN_noS0, bvalues, dw_data, p0=p0, bounds=bounds2)
S0 = 1
else:
# bounds are rescaled such that each parameter changes at roughly the same rate to help fitting.
bounds = ([bounds[0][0] * 1000, bounds[0][1] * 10, bounds[0][2] * 10, bounds[0][3]],
bounds2 = ([bounds[0][0] * 1000, bounds[0][1] * 10, bounds[0][2] * 10, bounds[0][3]],
[bounds[1][0] * 1000, bounds[1][1] * 10, bounds[1][2] * 10, bounds[1][3]])
p0=[p0[0]*1000,p0[1]*10,p0[2]*10,p0[3]]
params, _ = curve_fit(ivimN, bvalues, dw_data, p0=p0, bounds=bounds)
params, _ = curve_fit(ivimN, bvalues, dw_data, p0=p0, bounds=bounds2)
S0 = params[3]
# correct for the rescaling of parameters
Dt, Fp, Dp = params[0] / 1000, params[1] / 10, params[2] / 10
Expand All @@ -261,7 +262,7 @@ def fit_least_squares(bvalues, dw_data, S0_output=False, fitS0=True,
Dt, Fp, Dp = fit_segmented(bvalues, dw_data, bounds=bounds)
return Dt, Fp, Dp, 1
else:
return fit_segmented(bvalues, dw_data)
return fit_segmented(bvalues, dw_data, bounds=bounds)


def fit_least_squares_array_tri_exp(bvalues, dw_data, S0_output=True, fitS0=True, njobs=4,
Expand Down Expand Up @@ -561,19 +562,19 @@ def neg_log_prior(p):
Dt, Fp, Dp = p[0], p[1], p[2]
# make D*<D very unlikely
if (Dp < Dt):
return 1e3
return 1e10
else:
# determine and return the prior for D, f and D* (and S0)
if len(p) == 4:
if Dt_range[0] < Dt < Dt_range[1] and Fp_range[0] < Fp < Fp_range[1] and Dp_range[0] < Dp < Dp_range[1]: # and S0_range[0] < S0 < S0_range[1]: << not sure whether this helps. Technically it should be here
if Dt_range[0] < Dt < Dt_range[1] and Fp_range[0] < Fp < Fp_range[1] and Dp_range[0] < Dp < Dp_range[1] and S0_range[0] < S0 < S0_range[1]: #<< not sure whether this helps. Technically it should be here
return 0
else:
return 1e3
return 1e10
else:
if Dt_range[0] < Dt < Dt_range[1] and Fp_range[0] < Fp < Fp_range[1] and Dp_range[0] < Dp < Dp_range[1]:
return 0
else:
return 1e3
return 1e10

return neg_log_prior

Expand Down Expand Up @@ -638,7 +639,7 @@ def parfun(i):
return Dt_pred, Fp_pred, Dp_pred, S0_pred


def fit_bayesian(bvalues, dw_data, neg_log_prior, x0=[0.001, 0.2, 0.05, 1], fitS0=True):
def fit_bayesian(bvalues, dw_data, neg_log_prior, x0=[0.001, 0.2, 0.05, 1], fitS0=True, bounds=([0,0,0,0],[0.005,1.5,2,2.5])):
'''
This is an implementation of the Bayesian IVIM fit. It returns the Maximum a posterior probability.
The fit is taken from Barbieri et al. which was initially introduced in http://arxiv.org/10.1002/mrm.25765 and
Expand All @@ -655,7 +656,7 @@ def fit_bayesian(bvalues, dw_data, neg_log_prior, x0=[0.001, 0.2, 0.05, 1], fitS
'''
try:
# define fit bounds
bounds = [(0, 0.005), (0, 1.5), (0, 2), (0, 2.5)]
bounds = [(bounds[0][0], bounds[1][0]), (bounds[0][1], bounds[1][1]), (bounds[0][2], bounds[1][2]), (bounds[0][3], bounds[1][3])]
# Find the Maximum a posterior probability (MAP) by minimising the negative log of the posterior
if fitS0:
params = minimize(neg_log_posterior, x0=x0, args=(bvalues, dw_data, neg_log_prior), bounds=bounds)
Expand Down
10 changes: 7 additions & 3 deletions src/standardized/ETP_SRI_LinearFitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non
the requirements.
"""
super(ETP_SRI_LinearFitting, self).__init__(bvalues, thresholds, bounds, initial_guess)

if bounds is not None:
print('warning, bounds from wrapper are not (yet) used in this algorithm')
self.use_bounds = False
self.use_initial_guess = False
# Could be a good idea to have all the submission-specfic variable be
# defined with initials?
self.ETP_weighting = weighting
Expand All @@ -57,6 +60,7 @@ def ivim_fit(self, signals, bvalues=None, linear_fit_option=False, **kwargs):
Returns:
_type_: _description_
"""
signals[signals<0.0000001]=0.0000001
if bvalues is None:
bvalues = self.bvalues

Expand All @@ -70,14 +74,14 @@ def ivim_fit(self, signals, bvalues=None, linear_fit_option=False, **kwargs):
f, Dstar = ETP_object.linear_fit(bvalues, signals)

results["f"] = f
results["D*"] = Dstar
results["Dp"] = Dstar

return results
else:
f, D, Dstar = ETP_object.ivim_fit(bvalues, signals)

results["f"] = f
results["D*"] = Dstar
results["Dp"] = Dstar
results["D"] = D

return results
Expand Down
9 changes: 6 additions & 3 deletions src/standardized/IAR_LU_biexp.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non
the requirements.
"""
super(IAR_LU_biexp, self).__init__(bvalues, thresholds, bounds, initial_guess)

if bounds is not None:
print('warning, bounds from wrapper are not (yet) used in this algorithm')
self.use_bounds = False
self.use_initial_guess = False
# Check the inputs

# Initialize the algorithm
Expand Down Expand Up @@ -78,7 +81,7 @@ def ivim_fit(self, signals, bvalues, **kwargs):

results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["Dp"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return results
Expand Down Expand Up @@ -110,7 +113,7 @@ def ivim_fit_full_volume(self, signals, bvalues, **kwargs):

results = {}
results["f"] = fit_results.model_params[..., 1]
results["D*"] = fit_results.model_params[..., 2]
results["Dp"] = fit_results.model_params[..., 2]
results["D"] = fit_results.model_params[..., 3]

return results
7 changes: 5 additions & 2 deletions src/standardized/IAR_LU_modified_mix.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non
the requirements.
"""
super(IAR_LU_modified_mix, self).__init__(bvalues, thresholds, bounds, initial_guess)

if bounds is not None:
print('warning, bounds from wrapper are not (yet) used in this algorithm')
self.use_bounds = False
self.use_initial_guess = False
# Check the inputs

# Initialize the algorithm
Expand Down Expand Up @@ -81,7 +84,7 @@ def ivim_fit(self, signals, bvalues, **kwargs):
#D = fit_results.model_params[3]
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["Dp"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return results
7 changes: 5 additions & 2 deletions src/standardized/IAR_LU_modified_topopro.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non
the requirements.
"""
super(IAR_LU_modified_topopro, self).__init__(bvalues, thresholds, bounds, initial_guess)

if bounds is not None:
print('warning, bounds from wrapper are not (yet) used in this algorithm')
self.use_bounds = False
self.use_initial_guess = False
# Check the inputs

# Initialize the algorithm
Expand Down Expand Up @@ -83,7 +86,7 @@ def ivim_fit(self, signals, bvalues, **kwargs):
#return f, Dstar, D
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["Dp"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return results
7 changes: 5 additions & 2 deletions src/standardized/IAR_LU_segmented_2step.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non
the requirements.
"""
super(IAR_LU_segmented_2step, self).__init__(bvalues, thresholds, bounds, initial_guess)

if bounds is not None:
print('warning, bounds from wrapper are not (yet) used in this algorithm')
self.use_bounds = False
self.use_initial_guess = False
# Check the inputs

# Initialize the algorithm
Expand Down Expand Up @@ -84,7 +87,7 @@ def ivim_fit(self, signals, bvalues, thresholds=None, **kwargs):
#return f, Dstar, D
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["Dp"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return results
7 changes: 5 additions & 2 deletions src/standardized/IAR_LU_segmented_3step.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non
the requirements.
"""
super(IAR_LU_segmented_3step, self).__init__(bvalues, thresholds, bounds, initial_guess)

if bounds is not None:
print('warning, bounds from wrapper are not (yet) used in this algorithm')
self.use_bounds = False
self.use_initial_guess = False
# Check the inputs

# Initialize the algorithm
Expand Down Expand Up @@ -83,7 +86,7 @@ def ivim_fit(self, signals, bvalues, **kwargs):
#return f, Dstar, D
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["Dp"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return results
7 changes: 5 additions & 2 deletions src/standardized/IAR_LU_subtracted.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ def __init__(self, bvalues=None, thresholds=None, bounds=None, initial_guess=Non
the requirements.
"""
super(IAR_LU_subtracted, self).__init__(bvalues, thresholds, bounds, initial_guess)

if bounds is not None:
print('warning, bounds from wrapper are not (yet) used in this algorithm')
self.use_bounds = False
self.use_initial_guess = False
# Check the inputs

# Initialize the algorithm
Expand Down Expand Up @@ -83,7 +86,7 @@ def ivim_fit(self, signals, bvalues, **kwargs):
#return f, Dstar, D
results = {}
results["f"] = fit_results.model_params[1]
results["D*"] = fit_results.model_params[2]
results["Dp"] = fit_results.model_params[2]
results["D"] = fit_results.model_params[3]

return results
Loading
Loading