diff options
author | Aaron LI <aly@aaronly.me> | 2019-02-22 23:40:26 +0800 |
---|---|---|
committer | Aaron LI <aly@aaronly.me> | 2019-02-22 23:40:26 +0800 |
commit | 40fa40f841f069c2d74cf0da3831dbea7ad8af1f (patch) | |
tree | 68c76e1a47f1df6b17d0f59961beae053e25e3d3 /fg21sim/utils | |
parent | 8ff2d1feb15731a53d2f3ea0d7b4638962ad2771 (diff) | |
download | fg21sim-40fa40f841f069c2d74cf0da3831dbea7ad8af1f.tar.bz2 |
utils/analyze: Improve logfit() to output uncertainties
Use ``scipy.optimize.curve_fit()`` to do the underlying fitting, which
can give the uncertainties of the fitted parameters.
Rename logfit() to loglinfit().
Diffstat (limited to 'fg21sim/utils')
-rw-r--r-- | fg21sim/utils/analyze.py | 33 |
1 files changed, 25 insertions, 8 deletions
diff --git a/fg21sim/utils/analyze.py b/fg21sim/utils/analyze.py index 6c7d89a..65dfa47 100644 --- a/fg21sim/utils/analyze.py +++ b/fg21sim/utils/analyze.py @@ -8,6 +8,7 @@ Utilities to help analyze the simulation results. import logging import numpy as np +from scipy import optimize logger = logging.getLogger(__name__) @@ -87,26 +88,42 @@ def countdist_integrated(x, nbin, log=True, xmin=None, xmax=None): return counts, bins, binedges -def logfit(x, y): +def loglinfit(x, y, **kwargs): """ - Fit the data points with: y = a * x^b + Fit the data points with a log-linear model: y = a * x^b Parameters ---------- x, y : list[float] The data points. + kwargs : dict + Extra parameters passed to ``scipy.optimize.least_squares()``. Returns ------- coef : (a, b) The fitted coefficients. - fp : function + err : (a_err, b_err) + The uncertainties of the coefficients. + fun : function The function with fitted coefficients to calculate the fitted - values: fp(x). + values: fun(x). """ + def _f_poly1(x, a, b): + return a + b * x + logx = np.log(x) logy = np.log(y) - fit = np.polyfit(logx, logy, deg=1) - coef = (np.exp(fit[1]), fit[0]) - fp = lambda x: np.exp(np.polyval(fit, np.log(x))) - return coef, fp + f_scale = np.mean(logy) + args = { + "method": "trf", + "loss": "soft_l1", + "f_scale": np.mean(logy), + } + args.update(kwargs) + p, pcov = optimize.curve_fit(_f_poly1, logx, logy, p0=(1, 1), **args) + coef = (np.exp(p[0]), p[1]) + perr = np.sqrt(np.diag(pcov)) + err = (np.exp(perr[0]), perr[1]) + fun = lambda x: np.exp(_f_poly1(np.log(x), *p)) + return coef, err, fun |