Skip to content

Commit 882df8c

Browse files
authored
Merge pull request #120 from florisvb/improve-fd
made first order actually first order and added fourth order
2 parents 79f3eb9 + df996dc commit 882df8c

File tree

15 files changed

+509
-231
lines changed

15 files changed

+509
-231
lines changed

examples/1_basic_tutorial.ipynb

Lines changed: 142 additions & 48 deletions
Large diffs are not rendered by default.

examples/2a_optimizing_parameters_with_dxdt_known.ipynb

Lines changed: 118 additions & 44 deletions
Large diffs are not rendered by default.

examples/2b_optimizing_parameters_with_dxdt_unknown.ipynb

Lines changed: 117 additions & 43 deletions
Large diffs are not rendered by default.

pynumdiff/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
"""Import useful functions from all modules
22
"""
33
from pynumdiff._version import __version__
4-
from pynumdiff.finite_difference import first_order, second_order
4+
from pynumdiff.finite_difference import first_order, second_order, fourth_order
55
from pynumdiff.smooth_finite_difference import mediandiff, meandiff, gaussiandiff,\
66
friedrichsdiff, butterdiff, splinediff
77
from pynumdiff.total_variation_regularization import iterative_velocity, velocity,\
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
"""This module implements some common finite difference schemes
22
"""
3-
from ._finite_difference import first_order, second_order
3+
from ._finite_difference import first_order, second_order, fourth_order
44

5-
__all__ = ['first_order', 'second_order'] # So these get treated as direct members of the module by sphinx
5+
__all__ = ['first_order', 'second_order', 'fourth_order'] # So these get treated as direct members of the module by sphinx
Lines changed: 75 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1,81 +1,111 @@
1+
"""This is handy for this module https://web.media.mit.edu/~crtaylor/calculator.html"""
12
import numpy as np
23
from pynumdiff.utils import utility
34
from warnings import warn
45

56

6-
def first_order(x, dt, params=None, options={}, num_iterations=None):
7+
def _finite_difference(x, dt, num_iterations, order):
8+
"""Helper for all finite difference methods, since their iteration structure is all the same.
9+
10+
:param int order: 1, 2, or 4, controls which finite differencing scheme to employ
11+
For other parameters and return values, see public function docstrings
12+
"""
13+
if num_iterations < 1: raise ValueError("num_iterations must be >0")
14+
if order not in [1, 2, 4]: raise ValueError("order must be 1, 2, or 4")
15+
16+
x_hat = x # preserve a reference to x, because if iterating we need it to find the final constant of integration
17+
dxdt_hat = np.zeros(x.shape) # preallocate reusable memory
18+
19+
# For all but the last iteration, do the differentate->integrate smoothing loop, being careful with endpoints
20+
for i in range(num_iterations-1):
21+
if order == 1:
22+
dxdt_hat[:-1] = np.diff(x_hat)
23+
dxdt_hat[-1] = dxdt_hat[-2] # using stencil -1,0 vs stencil 0,1 you get an expression for the same value
24+
elif order == 2:
25+
dxdt_hat[1:-1] = (x_hat[2:] - x_hat[:-2])/2 # second-order center-difference formula
26+
dxdt_hat[0] = x_hat[1] - x_hat[0]
27+
dxdt_hat[-1] = x_hat[-1] - x_hat[-2] # use first-order endpoint formulas so as not to amplify noise. See #104
28+
elif order == 4:
29+
dxdt_hat[2:-2] = (8*(x_hat[3:-1] - x_hat[1:-3]) - x_hat[4:] + x_hat[:-4])/12 # fourth-order center-difference
30+
dxdt_hat[:2] = np.diff(x_hat[:3])
31+
dxdt_hat[-2:] = np.diff(x_hat[-3:])
32+
33+
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt=1) # estimate new x_hat by integrating derivative
34+
# We can skip dividing by dt here and pass dt=1, because the integration multiplies dt back in.
35+
# No need to find integration constant until the very end, because we just differentiate again.
36+
37+
if order == 1:
38+
dxdt_hat[:-1] = np.diff(x_hat)
39+
dxdt_hat[-1] = dxdt_hat[-2] # using stencil -1,0 vs stencil 0,1 you get an expression for the same value
40+
elif order == 2:
41+
dxdt_hat[1:-1] = x_hat[2:] - x_hat[:-2] # second-order center-difference formula
42+
dxdt_hat[0] = -3 * x_hat[0] + 4 * x_hat[1] - x_hat[2] # second-order endpoint formulas
43+
dxdt_hat[-1] = 3 * x_hat[-1] - 4 * x_hat[-2] + x_hat[-3]
44+
dxdt_hat /= 2
45+
elif order == 4:
46+
dxdt_hat[2:-2] = 8*(x_hat[3:-1] - x_hat[1:-3]) - x_hat[4:] + x_hat[:-4] # fourth-order center-difference
47+
dxdt_hat[0] = -25*x_hat[0] + 48*x_hat[1] - 36*x_hat[2] + 16*x_hat[3] - 3*x_hat[4]
48+
dxdt_hat[1] = -3*x_hat[0] - 10*x_hat[1] + 18*x_hat[2] - 6*x_hat[3] + x_hat[4]
49+
dxdt_hat[-2] = 3*x_hat[-1] + 10*x_hat[-2] - 18*x_hat[-3] + 6*x_hat[-4] - x_hat[-5]
50+
dxdt_hat[-1] = 25*x_hat[-1] - 48*x_hat[-2] + 36*x_hat[-3] - 16*x_hat[-4] + 3*x_hat[-5]
51+
dxdt_hat /= 12
52+
dxdt_hat /= dt # don't forget to scale by dt, can't skip it this time
53+
54+
if num_iterations > 1: # We've lost a constant of integration in the above
55+
x_hat += utility.estimate_integration_constant(x, x_hat) # uses least squares
56+
57+
return x_hat, dxdt_hat
58+
59+
60+
def first_order(x, dt, params=None, options={}, num_iterations=1):
761
"""First-order centered difference method
862
963
:param np.array[float] x: data to differentiate
1064
:param float dt: step size
1165
:param list[float] or float params: (**deprecated**, prefer :code:`num_iterations`)
1266
:param dict options: (**deprecated**, prefer :code:`num_iterations`) a dictionary consisting of {'iterate': (bool)}
13-
:param int num_iterations: If performing iterated FD to smooth the estimates, give the number of iterations.
14-
If ungiven, FD will not be iterated.
67+
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
68+
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
1569
1670
:return: tuple[np.array, np.array] of\n
17-
- **x_hat** -- estimated (smoothed) x
71+
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
1872
- **dxdt_hat** -- estimated derivative of x
1973
"""
74+
warn("`first_order` in past releases was actually calculating a second-order FD. Use `second_order` to achieve " +
75+
"approximately the same behavior. Note that odd-order methods have asymmetrical stencils, which causes " +
76+
"horizontal drift in the answer, especially when iterating.", DeprecationWarning)
2077
if params != None and 'iterate' in options:
2178
warn("`params` and `options` parameters will be removed in a future version. Use `num_iterations` instead.", DeprecationWarning)
22-
if isinstance(params, list): params = params[0]
23-
return _iterate_first_order(x, dt, params)
24-
elif num_iterations:
25-
return _iterate_first_order(x, dt, num_iterations)
26-
27-
dxdt_hat = np.diff(x) / dt # Calculate the finite difference
28-
dxdt_hat = np.hstack((dxdt_hat[0], dxdt_hat, dxdt_hat[-1])) # Pad the data
29-
dxdt_hat = np.mean((dxdt_hat[0:-1], dxdt_hat[1:]), axis=0) # Re-finite dxdt_hat using linear interpolation
79+
num_iterations = params[0] if isinstance(params, list) else params
3080

31-
return x, dxdt_hat
81+
return _finite_difference(x, dt, num_iterations, 1)
3282

3383

34-
def second_order(x, dt):
35-
"""Second-order centered difference method
84+
def second_order(x, dt, num_iterations=1):
85+
"""Second-order centered difference method, with special endpoint formulas.
3686
3787
:param np.array[float] x: data to differentiate
3888
:param float dt: step size
89+
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
90+
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
3991
4092
:return: tuple[np.array, np.array] of\n
41-
- **x_hat** -- estimated (smoothed) x
93+
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
4294
- **dxdt_hat** -- estimated derivative of x
4395
"""
44-
dxdt_hat = (x[2:] - x[0:-2]) / (2 * dt)
45-
first_dxdt_hat = (-3 * x[0] + 4 * x[1] - x[2]) / (2 * dt)
46-
last_dxdt_hat = (3 * x[-1] - 4 * x[-2] + x[-3]) / (2 * dt)
47-
dxdt_hat = np.hstack((first_dxdt_hat, dxdt_hat, last_dxdt_hat))
48-
return x, dxdt_hat
96+
return _finite_difference(x, dt, num_iterations, 2)
4997

5098

51-
def _x_hat_using_finite_difference(x, dt):
52-
"""Find a smoothed estimate of the true function by taking FD and then integrating with trapezoids
53-
"""
54-
x_hat, dxdt_hat = first_order(x, dt)
55-
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
56-
x0 = utility.estimate_initial_condition(x, x_hat)
57-
return x_hat + x0
58-
59-
60-
def _iterate_first_order(x, dt, num_iterations):
61-
"""Iterative first order centered finite difference.
99+
def fourth_order(x, dt, num_iterations=1):
100+
"""Fourth-order centered difference method, with special endpoint formulas.
62101
63102
:param np.array[float] x: data to differentiate
64103
:param float dt: step size
65-
:param int num_iterations: number of iterations
104+
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
105+
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
66106
67107
:return: tuple[np.array, np.array] of\n
68-
- **x_hat** -- estimated (smoothed) x
108+
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
69109
- **dxdt_hat** -- estimated derivative of x
70110
"""
71-
w = np.linspace(0, 1, len(x)) # set up weights, [0., ... 1.0]
72-
73-
# forward backward passes
74-
for _ in range(num_iterations):
75-
xf = _x_hat_using_finite_difference(x, dt)
76-
xb = _x_hat_using_finite_difference(x[::-1], dt)
77-
x = xf * w + xb[::-1] * (1 - w)
78-
79-
x_hat, dxdt_hat = first_order(x, dt)
80-
81-
return x_hat, dxdt_hat
111+
return _finite_difference(x, dt, num_iterations, 4)

pynumdiff/linear_model/_linear_model.py

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@
1414
# Savitzky-Golay filter #
1515
#########################
1616
def savgoldiff(x, dt, params=None, options=None, poly_order=None, window_size=None, smoothing_win=None):
17-
"""Use the Savitzky-Golay to smooth the data and calculate the first derivative. It wses
17+
"""Use the Savitzky-Golay to smooth the data and calculate the first derivative. It uses
1818
scipy.signal.savgol_filter. The Savitzky-Golay is very similar to the sliding polynomial fit,
19-
but slightly noisier, and much faster
19+
but slightly noisier, and much faster.
2020
2121
:param np.array[float] x: data to differentiate
2222
:param float dt: step size
@@ -48,7 +48,7 @@ def savgoldiff(x, dt, params=None, options=None, poly_order=None, window_size=No
4848
dxdt_hat = utility.convolutional_smoother(dxdt_hat, kernel)
4949

5050
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
51-
x0 = utility.estimate_initial_condition(x, x_hat)
51+
x0 = utility.estimate_integration_constant(x, x_hat)
5252
x_hat = x_hat + x0
5353

5454
return x_hat, dxdt_hat
@@ -199,11 +199,8 @@ def _polydiff(x, dt, poly_order, weights=None):
199199
# Linear diff #
200200
###############
201201
def _solve_for_A_and_C_given_X_and_Xdot(X, Xdot, num_integrations, dt, gammaC=1e-1, gammaA=1e-6,
202-
solver=None, A_known=None, epsilon=1e-6, rows_of_interest='all'):
202+
solver=None, A_known=None, epsilon=1e-6):
203203
"""Given state and the derivative, find the system evolution and measurement matrices."""
204-
if rows_of_interest == 'all':
205-
rows_of_interest = np.arange(0, X.shape[0])
206-
207204
# Set up the variables
208205
A = cvxpy.Variable((X.shape[0], X.shape[0]))
209206
C = cvxpy.Variable((X.shape[0], num_integrations))
@@ -219,7 +216,7 @@ def _solve_for_A_and_C_given_X_and_Xdot(X, Xdot, num_integrations, dt, gammaC=1e
219216
Csum = Csum + Cn
220217

221218
# Define the objective function
222-
error = cvxpy.sum_squares(Xdot[rows_of_interest, :] - ( cvxpy.matmul(A, X) + Csum)[rows_of_interest, :])
219+
error = cvxpy.sum_squares(Xdot - ( cvxpy.matmul(A, X) + Csum))
223220
C_regularization = gammaC*cvxpy.sum(cvxpy.abs(C))
224221
A_regularization = gammaA*cvxpy.sum(cvxpy.abs(A))
225222
obj = cvxpy.Minimize(error + C_regularization + A_regularization)
@@ -238,8 +235,7 @@ def _solve_for_A_and_C_given_X_and_Xdot(X, Xdot, num_integrations, dt, gammaC=1e
238235
prob = cvxpy.Problem(obj, constraints)
239236
prob.solve(solver=solver)
240237

241-
A = np.array(A.value)
242-
return A, np.array(C.value)
238+
return np.array(A.value), np.array(C.value)
243239

244240
def lineardiff(x, dt, params=None, options=None, order=None, gamma=None, window_size=None,
245241
step_size=10, kernel='friedrichs', solver=None):
@@ -306,7 +302,7 @@ def _lineardiff(x, dt, order, gamma, solver=None):
306302
dxdt_hat = np.ravel(Xdot_reconstructed[-1, :])
307303

308304
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
309-
x_hat = x_hat + utility.estimate_initial_condition(x+mean, x_hat)
305+
x_hat = x_hat + utility.estimate_integration_constant(x+mean, x_hat)
310306

311307
return x_hat, dxdt_hat
312308

@@ -408,7 +404,7 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
408404

409405
# Integrate to get x_hat
410406
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
411-
x0 = utility.estimate_initial_condition(x[padding:original_L+padding], x_hat)
407+
x0 = utility.estimate_integration_constant(x[padding:original_L+padding], x_hat)
412408
x_hat = x_hat + x0
413409

414410
return x_hat, dxdt_hat

pynumdiff/optimize/_optimize.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from multiprocessing import Pool
88

99
from ..utils import evaluate
10-
from ..finite_difference import first_order
10+
from ..finite_difference import first_order, second_order, fourth_order
1111
from ..smooth_finite_difference import mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff, splinediff
1212
from ..linear_model import spectraldiff, polydiff, savgoldiff, lineardiff
1313
from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity, smooth_acceleration, jerk_sliding
@@ -80,6 +80,8 @@
8080
{'q': (1e-10, 1e10),
8181
'r': (1e-10, 1e10)})
8282
}
83+
for method in [second_order, fourth_order]:
84+
method_params_and_bounds[method] = method_params_and_bounds[first_order]
8385
for method in [meandiff, gaussiandiff, friedrichsdiff]:
8486
method_params_and_bounds[method] = method_params_and_bounds[mediandiff]
8587
for method in [acceleration, jerk]:
@@ -158,15 +160,15 @@ def optimize(func, x, dt, search_space={}, dxdt_truth=None, tvgamma=1e-2, paddin
158160
singleton_params = {k:v for k,v in params.items() if not isinstance(v, list)}
159161

160162
# The search space is the Cartesian product of all dimensions where multiple options are given
161-
search_space_types = {k:type(v[0]) for k,v in params.items() if isinstance(v, list)} # for converting back and forth from point
163+
search_space_types = {k:type(v[0]) for k,v in params.items() if isinstance(v, list)} # map param name -> type for converting to and from point
162164
if any(v not in [float, int, bool] for v in search_space_types.values()):
163165
raise ValueError("Optimization over categorical strings currently not supported")
164166
# If excluding string type, I can just cast ints and bools to floats, and we're good to go
165167
cartesian_product = product(*[np.array(params[k]).astype(float) for k in search_space_types])
166168

167169
bounds = [bounds[k] if k in bounds else # pass these to minimize(). It should respect them.
168170
(0, 1) if v == bool else
169-
None for k,v in search_space_types.items()]
171+
None for k,v in search_space_types.items()] # None means no bound on a dimension
170172

171173
# wrap the objective and scipy.optimize.minimize because the objective and options are always the same
172174
_obj_fun = partial(_objective_function, func=func, x=x, dt=dt, singleton_params=singleton_params,

pynumdiff/smooth_finite_difference/_smooth_finite_difference.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from warnings import warn
44

55
# included code
6-
from pynumdiff.finite_difference import first_order as finite_difference
6+
from pynumdiff.finite_difference import second_order as finite_difference
77
from pynumdiff.utils import utility
88

99

pynumdiff/tests/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ def store_plots(request):
1414
def pytest_sessionfinish(session, exitstatus):
1515
if not hasattr(session.config, 'plots'): return
1616
for method,(fig,axes) in session.config.plots.items():
17-
axes[-1,-1].legend()
17+
fig.legend(*axes[-1, -1].get_legend_handles_labels(), loc='lower left', ncol=2)
1818
fig.suptitle(method.__name__)
1919
fig.tight_layout()
2020
pyplot.show()

0 commit comments

Comments
 (0)