|
| 1 | +"""This is handy for this module https://web.media.mit.edu/~crtaylor/calculator.html""" |
1 | 2 | import numpy as np |
2 | 3 | from pynumdiff.utils import utility |
3 | 4 | from warnings import warn |
4 | 5 |
|
5 | 6 |
|
6 | | -def first_order(x, dt, params=None, options={}, num_iterations=None): |
| 7 | +def _finite_difference(x, dt, num_iterations, order): |
| 8 | + """Helper for all finite difference methods, since their iteration structure is all the same. |
| 9 | + |
| 10 | + :param int order: 1, 2, or 4, controls which finite differencing scheme to employ |
| 11 | + For other parameters and return values, see public function docstrings |
| 12 | + """ |
| 13 | + if num_iterations < 1: raise ValueError("num_iterations must be >0") |
| 14 | + if order not in [1, 2, 4]: raise ValueError("order must be 1, 2, or 4") |
| 15 | + |
| 16 | + x_hat = x # preserve a reference to x, because if iterating we need it to find the final constant of integration |
| 17 | + dxdt_hat = np.zeros(x.shape) # preallocate reusable memory |
| 18 | + |
| 19 | + # For all but the last iteration, do the differentate->integrate smoothing loop, being careful with endpoints |
| 20 | + for i in range(num_iterations-1): |
| 21 | + if order == 1: |
| 22 | + dxdt_hat[:-1] = np.diff(x_hat) |
| 23 | + dxdt_hat[-1] = dxdt_hat[-2] # using stencil -1,0 vs stencil 0,1 you get an expression for the same value |
| 24 | + elif order == 2: |
| 25 | + dxdt_hat[1:-1] = (x_hat[2:] - x_hat[:-2])/2 # second-order center-difference formula |
| 26 | + dxdt_hat[0] = x_hat[1] - x_hat[0] |
| 27 | + dxdt_hat[-1] = x_hat[-1] - x_hat[-2] # use first-order endpoint formulas so as not to amplify noise. See #104 |
| 28 | + elif order == 4: |
| 29 | + dxdt_hat[2:-2] = (8*(x_hat[3:-1] - x_hat[1:-3]) - x_hat[4:] + x_hat[:-4])/12 # fourth-order center-difference |
| 30 | + dxdt_hat[:2] = np.diff(x_hat[:3]) |
| 31 | + dxdt_hat[-2:] = np.diff(x_hat[-3:]) |
| 32 | + |
| 33 | + x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt=1) # estimate new x_hat by integrating derivative |
| 34 | + # We can skip dividing by dt here and pass dt=1, because the integration multiplies dt back in. |
| 35 | + # No need to find integration constant until the very end, because we just differentiate again. |
| 36 | + |
| 37 | + if order == 1: |
| 38 | + dxdt_hat[:-1] = np.diff(x_hat) |
| 39 | + dxdt_hat[-1] = dxdt_hat[-2] # using stencil -1,0 vs stencil 0,1 you get an expression for the same value |
| 40 | + elif order == 2: |
| 41 | + dxdt_hat[1:-1] = x_hat[2:] - x_hat[:-2] # second-order center-difference formula |
| 42 | + dxdt_hat[0] = -3 * x_hat[0] + 4 * x_hat[1] - x_hat[2] # second-order endpoint formulas |
| 43 | + dxdt_hat[-1] = 3 * x_hat[-1] - 4 * x_hat[-2] + x_hat[-3] |
| 44 | + dxdt_hat /= 2 |
| 45 | + elif order == 4: |
| 46 | + dxdt_hat[2:-2] = 8*(x_hat[3:-1] - x_hat[1:-3]) - x_hat[4:] + x_hat[:-4] # fourth-order center-difference |
| 47 | + dxdt_hat[0] = -25*x_hat[0] + 48*x_hat[1] - 36*x_hat[2] + 16*x_hat[3] - 3*x_hat[4] |
| 48 | + dxdt_hat[1] = -3*x_hat[0] - 10*x_hat[1] + 18*x_hat[2] - 6*x_hat[3] + x_hat[4] |
| 49 | + dxdt_hat[-2] = 3*x_hat[-1] + 10*x_hat[-2] - 18*x_hat[-3] + 6*x_hat[-4] - x_hat[-5] |
| 50 | + dxdt_hat[-1] = 25*x_hat[-1] - 48*x_hat[-2] + 36*x_hat[-3] - 16*x_hat[-4] + 3*x_hat[-5] |
| 51 | + dxdt_hat /= 12 |
| 52 | + dxdt_hat /= dt # don't forget to scale by dt, can't skip it this time |
| 53 | + |
| 54 | + if num_iterations > 1: # We've lost a constant of integration in the above |
| 55 | + x_hat += utility.estimate_integration_constant(x, x_hat) # uses least squares |
| 56 | + |
| 57 | + return x_hat, dxdt_hat |
| 58 | + |
| 59 | + |
| 60 | +def first_order(x, dt, params=None, options={}, num_iterations=1): |
7 | 61 | """First-order centered difference method |
8 | 62 |
|
9 | 63 | :param np.array[float] x: data to differentiate |
10 | 64 | :param float dt: step size |
11 | 65 | :param list[float] or float params: (**deprecated**, prefer :code:`num_iterations`) |
12 | 66 | :param dict options: (**deprecated**, prefer :code:`num_iterations`) a dictionary consisting of {'iterate': (bool)} |
13 | | - :param int num_iterations: If performing iterated FD to smooth the estimates, give the number of iterations. |
14 | | - If ungiven, FD will not be iterated. |
| 67 | + :param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal |
| 68 | + rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times |
15 | 69 |
|
16 | 70 | :return: tuple[np.array, np.array] of\n |
17 | | - - **x_hat** -- estimated (smoothed) x |
| 71 | + - **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat |
18 | 72 | - **dxdt_hat** -- estimated derivative of x |
19 | 73 | """ |
| 74 | + warn("`first_order` in past releases was actually calculating a second-order FD. Use `second_order` to achieve " + |
| 75 | + "approximately the same behavior. Note that odd-order methods have asymmetrical stencils, which causes " + |
| 76 | + "horizontal drift in the answer, especially when iterating.", DeprecationWarning) |
20 | 77 | if params != None and 'iterate' in options: |
21 | 78 | warn("`params` and `options` parameters will be removed in a future version. Use `num_iterations` instead.", DeprecationWarning) |
22 | | - if isinstance(params, list): params = params[0] |
23 | | - return _iterate_first_order(x, dt, params) |
24 | | - elif num_iterations: |
25 | | - return _iterate_first_order(x, dt, num_iterations) |
26 | | - |
27 | | - dxdt_hat = np.diff(x) / dt # Calculate the finite difference |
28 | | - dxdt_hat = np.hstack((dxdt_hat[0], dxdt_hat, dxdt_hat[-1])) # Pad the data |
29 | | - dxdt_hat = np.mean((dxdt_hat[0:-1], dxdt_hat[1:]), axis=0) # Re-finite dxdt_hat using linear interpolation |
| 79 | + num_iterations = params[0] if isinstance(params, list) else params |
30 | 80 |
|
31 | | - return x, dxdt_hat |
| 81 | + return _finite_difference(x, dt, num_iterations, 1) |
32 | 82 |
|
33 | 83 |
|
34 | | -def second_order(x, dt): |
35 | | - """Second-order centered difference method |
| 84 | +def second_order(x, dt, num_iterations=1): |
| 85 | + """Second-order centered difference method, with special endpoint formulas. |
36 | 86 |
|
37 | 87 | :param np.array[float] x: data to differentiate |
38 | 88 | :param float dt: step size |
| 89 | + :param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal |
| 90 | + rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times |
39 | 91 |
|
40 | 92 | :return: tuple[np.array, np.array] of\n |
41 | | - - **x_hat** -- estimated (smoothed) x |
| 93 | + - **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat |
42 | 94 | - **dxdt_hat** -- estimated derivative of x |
43 | 95 | """ |
44 | | - dxdt_hat = (x[2:] - x[0:-2]) / (2 * dt) |
45 | | - first_dxdt_hat = (-3 * x[0] + 4 * x[1] - x[2]) / (2 * dt) |
46 | | - last_dxdt_hat = (3 * x[-1] - 4 * x[-2] + x[-3]) / (2 * dt) |
47 | | - dxdt_hat = np.hstack((first_dxdt_hat, dxdt_hat, last_dxdt_hat)) |
48 | | - return x, dxdt_hat |
| 96 | + return _finite_difference(x, dt, num_iterations, 2) |
49 | 97 |
|
50 | 98 |
|
51 | | -def _x_hat_using_finite_difference(x, dt): |
52 | | - """Find a smoothed estimate of the true function by taking FD and then integrating with trapezoids |
53 | | - """ |
54 | | - x_hat, dxdt_hat = first_order(x, dt) |
55 | | - x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt) |
56 | | - x0 = utility.estimate_initial_condition(x, x_hat) |
57 | | - return x_hat + x0 |
58 | | - |
59 | | - |
60 | | -def _iterate_first_order(x, dt, num_iterations): |
61 | | - """Iterative first order centered finite difference. |
| 99 | +def fourth_order(x, dt, num_iterations=1): |
| 100 | + """Fourth-order centered difference method, with special endpoint formulas. |
62 | 101 |
|
63 | 102 | :param np.array[float] x: data to differentiate |
64 | 103 | :param float dt: step size |
65 | | - :param int num_iterations: number of iterations |
| 104 | + :param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal |
| 105 | + rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times |
66 | 106 |
|
67 | 107 | :return: tuple[np.array, np.array] of\n |
68 | | - - **x_hat** -- estimated (smoothed) x |
| 108 | + - **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat |
69 | 109 | - **dxdt_hat** -- estimated derivative of x |
70 | 110 | """ |
71 | | - w = np.linspace(0, 1, len(x)) # set up weights, [0., ... 1.0] |
72 | | - |
73 | | - # forward backward passes |
74 | | - for _ in range(num_iterations): |
75 | | - xf = _x_hat_using_finite_difference(x, dt) |
76 | | - xb = _x_hat_using_finite_difference(x[::-1], dt) |
77 | | - x = xf * w + xb[::-1] * (1 - w) |
78 | | - |
79 | | - x_hat, dxdt_hat = first_order(x, dt) |
80 | | - |
81 | | - return x_hat, dxdt_hat |
| 111 | + return _finite_difference(x, dt, num_iterations, 4) |
0 commit comments