Skip to main content

nyx_space/od/kalman/
filtering.rs

1/*
2    Nyx, blazing fast astrodynamics
3    Copyright (C) 2018-onwards Christopher Rabotin <christopher.rabotin@gmail.com>
4
5    This program is free software: you can redistribute it and/or modify
6    it under the terms of the GNU Affero General Public License as published
7    by the Free Software Foundation, either version 3 of the License, or
8    (at your option) any later version.
9
10    This program is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13    GNU Affero General Public License for more details.
14
15    You should have received a copy of the GNU Affero General Public License
16    along with this program.  If not, see <https://www.gnu.org/licenses/>.
17*/
18
19pub use crate::errors::NyxError;
20use crate::linalg::allocator::Allocator;
21use crate::linalg::{DefaultAllocator, DimName, OMatrix, OVector};
22pub use crate::od::estimate::{Estimate, KfEstimate, Residual};
23use crate::od::prelude::KalmanVariant;
24use crate::od::process::ResidRejectCrit;
25pub use crate::od::snc::ProcessNoise;
26use crate::od::{ODDynamicsSnafu, ODError, State};
27pub use crate::time::{Epoch, Unit};
28use log::info;
29use snafu::prelude::*;
30
31use super::KalmanFilter;
32
33impl<T, A> KalmanFilter<T, A>
34where
35    A: DimName,
36    T: State,
37    DefaultAllocator: Allocator<<T as State>::Size>
38        + Allocator<<T as State>::VecLength>
39        + Allocator<A>
40        + Allocator<<T as State>::Size, <T as State>::Size>
41        + Allocator<A, A>
42        + Allocator<<T as State>::Size, A>
43        + Allocator<A, <T as State>::Size>,
44    <DefaultAllocator as Allocator<<T as State>::Size>>::Buffer<f64>: Copy,
45    <DefaultAllocator as Allocator<<T as State>::Size, <T as State>::Size>>::Buffer<f64>: Copy,
46{
47    /// Returns the previous estimate
48    pub fn previous_estimate(&self) -> &KfEstimate<T> {
49        &self.prev_estimate
50    }
51
52    pub fn set_previous_estimate(&mut self, est: &KfEstimate<T>) {
53        self.prev_estimate = *est;
54    }
55
56    /// Computes a time update/prediction (i.e. advances the filter estimate with the updated STM).
57    ///
58    /// May return a FilterError if the STM was not updated.
59    pub fn time_update(&mut self, nominal_state: T) -> Result<KfEstimate<T>, ODError> {
60        let stm = nominal_state.stm().context(ODDynamicsSnafu)?;
61        let mut covar_bar = stm * self.prev_estimate.covar * stm.transpose();
62
63        // Apply any process noise as in a normal time update, if applicable
64        for (i, snc) in self.process_noise.iter().enumerate().rev() {
65            if let Some(snc_contrib) = snc.propagate::<<T as State>::Size>(
66                nominal_state.orbit(),
67                nominal_state.epoch() - self.prev_estimate.epoch(),
68            )? {
69                if self.prev_used_snc != i {
70                    info!("Switched to {i}-th {snc}");
71                    self.prev_used_snc = i;
72                }
73                // Let's add the process noise
74                covar_bar += snc_contrib;
75                // And break so we don't add any more process noise
76                break;
77            }
78        }
79
80        let state_bar = if matches!(self.variant, KalmanVariant::DeviationTracking) {
81            stm * self.prev_estimate.state_deviation
82        } else {
83            OVector::<f64, <T as State>::Size>::zeros()
84        };
85        let estimate = KfEstimate {
86            nominal_state,
87            state_deviation: state_bar,
88            covar: covar_bar,
89            covar_bar,
90            stm,
91            predicted: true,
92        };
93        self.prev_estimate = estimate;
94        // Update the prev epoch for all SNCs
95        for snc in &mut self.process_noise {
96            snc.prev_epoch = Some(self.prev_estimate.epoch());
97        }
98        Ok(estimate)
99    }
100
101    /// Computes the measurement update with a provided real observation and computed observation.
102    ///
103    /// May return a FilterError if the STM or sensitivity matrices were not updated.
104    pub fn measurement_update<M: DimName>(
105        &mut self,
106        nominal_state: T,
107        real_obs: OVector<f64, M>,
108        computed_obs: OVector<f64, M>,
109        r_k: OMatrix<f64, M, M>,
110        h_tilde: OMatrix<f64, M, <T as State>::Size>,
111        resid_rejection: Option<ResidRejectCrit>,
112    ) -> Result<
113        (
114            KfEstimate<T>,
115            Residual<M>,
116            Option<OMatrix<f64, <T as State>::Size, M>>,
117        ),
118        ODError,
119    >
120    where
121        DefaultAllocator: Allocator<M>
122            + Allocator<M, M>
123            + Allocator<M, <T as State>::Size>
124            + Allocator<<T as State>::Size, M>
125            + Allocator<nalgebra::Const<1>, M>,
126    {
127        let epoch = nominal_state.epoch();
128
129        // Grab the state transition matrix.
130        let stm = nominal_state.stm().context(ODDynamicsSnafu)?;
131
132        // Propagate the covariance.
133        let mut covar_bar = stm * self.prev_estimate.covar * stm.transpose();
134
135        // Apply any process noise as in a normal time update, if applicable
136        for (i, snc) in self.process_noise.iter().enumerate().rev() {
137            if let Some(snc_contrib) = snc.propagate::<<T as State>::Size>(
138                nominal_state.orbit(),
139                nominal_state.epoch() - self.prev_estimate.epoch(),
140            )? {
141                if self.prev_used_snc != i {
142                    info!("Switched to {i}-th {snc}");
143                    self.prev_used_snc = i;
144                }
145                // Let's add the process noise
146                covar_bar += snc_contrib;
147                // And break so we don't add any more process noise
148                break;
149            }
150        }
151
152        // Project the propagated covariance into the measurement space.
153        let p_ht = covar_bar * h_tilde.transpose();
154        let h_p_ht = &h_tilde * &p_ht;
155
156        // Compute the innovation matrix (S_k).
157        let s_k = &h_p_ht + &r_k;
158
159        // Compute observation deviation/error (usually marked as y_i)
160        let prefit = real_obs.clone() - computed_obs.clone();
161
162        // Compute the prefit ratio for the automatic rejection.
163        // The measurement covariance is the square of the measurement itself.
164        // So we compute its Cholesky decomposition to return to the non squared values.
165        let s_k_chol = match s_k.clone().cholesky() {
166            Some(r_k_clone) => r_k_clone,
167            None => {
168                // In very rare case, when there isn't enough noise in the measurements,
169                // the inverting of S_k fails. If so, we revert back to the nominal Kalman derivation.
170                r_k.clone().cholesky().ok_or(ODError::SingularNoiseRk)?
171            }
172        };
173
174        // Get the L factor from the Cholesky decomposition
175        let l_matrix = s_k_chol.l();
176
177        // Solve L * v = prefit for the whitened residual vector v. This is an O(n^2) triangular solve, faster than a full Cholesky solve.
178        let whitened_resid = l_matrix.solve_lower_triangular(&prefit).unwrap();
179
180        // Compute the RMS ratio using the norm of the whitened vector This is the true Mahalanobis-based N-sigma ratio.
181        let ratio = (whitened_resid.norm_squared() / (M::DIM as f64)).sqrt();
182
183        // Compute the physical 1-sigma envelop. Using the diagonal of S_k (not L) is correct for physical innovation plots.
184        let innovation_trend = s_k.diagonal().map(|x| x.sqrt());
185
186        if let Some(resid_reject) = resid_rejection {
187            if ratio > resid_reject.num_sigmas {
188                // Reject this whole measurement and perform only a time update
189                let pred_est = self.time_update(nominal_state)?;
190                let resid = Residual::rejected(
191                    epoch,
192                    prefit,
193                    ratio,
194                    innovation_trend,
195                    real_obs,
196                    computed_obs,
197                );
198
199                return Ok((pred_est, resid, None));
200            }
201        }
202
203        // Instead of inverting the innovation matrix S_k, we will use the (super short) arXiv paper 1111.4144
204        // which shows how to use the Cholesky decomposition to invert a matrix, core tenets repeated here for my reference.
205        // \forall A \ in \mathbb{R}^{n\times n}, X=A^{-1} <=> A*X=I
206        // Cholesky: A = L*L^T
207        // Therefore, L*L^T*X = I
208        // 1. Solve L * Y = I  => Y = L^{-1} (via forward sub)
209        // 2. Solve L^T * X = Y => X = (L^T)^{-1} * L^{-1} = A^{-1}
210        //
211        // _However_, we can be more clever still!
212        // Instead of explicitly inverting the innovation matrix S_k, we solve the linear system
213        // S_k * K^T = H * P using Cholesky decomposition.
214        // This avoids the numerical instability of computing S_k^-1 directly.
215        // Math context:
216        // We want to solve A * X = B for X.
217        // 1. Decompose A into L * L^T (Cholesky).
218        // 2. Solve L * Y = B for Y (Forward substitution).
219        // 3. Solve L^T * X = Y for X (Backward substitution).
220
221        // Prepare the RHS of the linear system: (P * H^T)^T = H * P
222        // We want to solve: S_k * K^T = H * P
223        // So K = (S_k \ (H * P))^T
224        let rhs = p_ht.transpose();
225
226        // Solve for Gain using Cholesky
227        // We try standard Cholesky first.
228        let gain = match s_k.clone().cholesky() {
229            Some(chol) => {
230                // SOLVE, don't invert.
231                // chol.solve(B) computes S_k^{-1} * B more stably than inv(S_k) * B
232                let k_t = chol.solve(&rhs);
233                k_t.transpose()
234            }
235            None => {
236                // If this fails, revert the LU decomposition of nalgebra
237                // Invert the innovation covariance.
238                match s_k.try_inverse() {
239                    Some(s_k_inv) => covar_bar * &h_tilde.transpose() * &s_k_inv,
240                    None => {
241                        eprintln!(
242                            "SINGULAR GAIN\nr = {r_k}\nh = {h_tilde:.3e}\ncovar = {covar_bar:.3e}"
243                        );
244                        return Err(ODError::SingularKalmanGain);
245                    }
246                }
247            }
248        };
249
250        // Compute the state estimate, depends on the variant.
251        let (state_hat, res) = match self.variant {
252            KalmanVariant::ReferenceUpdate => {
253                // In EKF, the state hat is actually the state deviation. We trust the gain to be correct,
254                // so we just apply it directly to the prefit residual.
255                let state_hat = &gain * &prefit;
256                let postfit = &prefit - (&h_tilde * state_hat);
257                let resid = Residual::accepted(
258                    epoch,
259                    prefit,
260                    postfit,
261                    ratio,
262                    innovation_trend,
263                    real_obs,
264                    computed_obs,
265                );
266                (state_hat, resid)
267            }
268            KalmanVariant::DeviationTracking => {
269                // Time update
270                let state_bar = stm * self.prev_estimate.state_deviation;
271                let postfit = &prefit - (&h_tilde * state_bar);
272                (
273                    state_bar + &gain * &postfit,
274                    Residual::accepted(
275                        epoch,
276                        prefit,
277                        postfit,
278                        ratio,
279                        innovation_trend,
280                        real_obs,
281                        computed_obs,
282                    ),
283                )
284            }
285        };
286
287        // Compute covariance (Joseph update)
288        let first_term =
289            OMatrix::<f64, <T as State>::Size, <T as State>::Size>::identity() - &gain * &h_tilde;
290        let covar =
291            first_term * covar_bar * first_term.transpose() + &gain * &r_k * &gain.transpose();
292
293        // Force symmetry on the covariance
294        let covar = 0.5 * (covar + covar.transpose());
295
296        // And wrap up
297        let estimate = KfEstimate {
298            nominal_state,
299            state_deviation: state_hat,
300            covar,
301            covar_bar,
302            stm,
303            predicted: false,
304        };
305
306        self.prev_estimate = estimate;
307        // Update the prev epoch for all SNCs
308        for snc in &mut self.process_noise {
309            snc.prev_epoch = Some(self.prev_estimate.epoch());
310        }
311
312        Ok((estimate, res, Some(gain)))
313    }
314
315    pub fn replace_state(&self) -> bool {
316        matches!(self.variant, KalmanVariant::ReferenceUpdate)
317    }
318
319    /// Overwrites all of the process noises to the one provided
320    pub fn set_process_noise(&mut self, snc: ProcessNoise<A>) {
321        self.process_noise = vec![snc];
322    }
323}