This notebook compares NEES (Normalized Estimation Error Squared) for the Gal3 IMU EKF when the measurement update does and does not perform the reset (covariance transport) step.
We keep the setup simple and focus on the impact of the performReset flag in updateWithVector.
# Install GTSAM from pip if running in Google Colab
try:
import google.colab # type: ignore
%pip install --quiet gtsam-develop
except Exception:
pass # Not in Colab
import numpy as np
import plotly.graph_objects as go
import gtsam
from gtsam import Gal3
from gtsam import ConstantTwistScenario, ScenarioRunnerNEES: theoretical introduction¶
The Normalized Estimation Error Squared (NEES) is
where e is the estimation error in the current tangent space and P is the filter covariance. For a consistent filter with correct noise modeling, the expected NEES equals the state dimension (here 10). In practice, we estimate this expectation by Monte Carlo averaging over multiple simulated runs.
def nees(error, covariance):
"""Compute NEES = e^T P^{-1} e."""
return float(error.T @ np.linalg.solve(covariance, error))Scenario: steady yaw rate with constant body-frame velocity¶
radius = 30.0
angular_velocity = np.pi # rad/sec
w_b = np.array([0, 0, angular_velocity]) # body yaw rate
v_n = np.array([radius * angular_velocity, 0, 0]) # world-frame velocity
scenario = ConstantTwistScenario(w_b, v_n)
# Simulation parameters
dt = 1.0 / 180.0 # 1 degree per step
T = 6.0 # total duration (s)
N = int(T / dt)
# IMU params (NED)
params = gtsam.PreintegrationParams.MakeSharedD(9.81)
params.setAccelerometerCovariance(np.diag([2e-1] * 3))
params.setIntegrationCovariance(np.diag([2e-1] * 3))
params.setGyroscopeCovariance(np.diag([2e-2] * 3))
runner = ScenarioRunner(scenario, params, dt,
gtsam.imuBias.ConstantBias(np.zeros(3), np.zeros(3)))
Monte Carlo analysis of running the EKF¶
Source
def run_one_filter(times, params, X0, P0, R, measurements, true_states, perform_reset):
"""Run one EKF filter over the given measurements and return NEES values."""
nees_vals = np.zeros(len(times))
ekf = gtsam.Gal3ImuEKF(X0, P0, params)
for k, t in enumerate(times):
if k > 0:
omega_meas, acc_meas = measurements[k]["imu"]
ekf.predict(omega_meas, acc_meas, dt)
if measurements[k]["has_meas"]:
predicted_position = ekf.state().position()
H = measurements[k]["H"]
z = measurements[k]["z"]
ekf.updateWithVector(predicted_position, H, z, R, perform_reset)
X_true = true_states[k]
err = ekf.state().localCoordinates(X_true)
nees_vals[k] = nees(np.asarray(err).reshape(-1), ekf.covariance())
return nees_vals
def run_one_trial(
times, scenario, runner, params, P0, init_sampler, pos_sampler, pos_std, M
):
measurements = []
true_states = []
R = np.eye(3) * (pos_std**2)
for k, t in enumerate(times):
X_true = scenario.gal3(t)
true_states.append(X_true)
if k == 0:
xi0 = np.asarray(init_sampler.sample()).reshape(-1)
X0 = X_true.retract(xi0)
if k > 0:
omega_meas = runner.measuredAngularVelocity(t - dt)
acc_meas = runner.measuredSpecificForce(t - dt)
else:
omega_meas = np.zeros(3)
acc_meas = np.zeros(3)
has_meas = k % M == 0
if has_meas:
z = X_true.position() + np.asarray(pos_sampler.sample()).reshape(-1)
H = np.zeros((3, 10))
H[:, 6:9] = X_true.attitude().matrix()
else:
z = None
H = None
measurements.append(
{"imu": (omega_meas, acc_meas), "has_meas": has_meas, "z": z, "H": H}
)
nees_reset = run_one_filter(
times, params, X0, P0, R, measurements, true_states, True
)
nees_no_reset = run_one_filter(
times, params, X0, P0, R, measurements, true_states, False
)
return nees_reset, nees_no_reset# Initialize EKFs
init_sigmas = np.array([0.1, 0.1, 0.1, 0.4, 0.4, 0.4, 2.0, 2.0, 2.0, 0.1])
P0 = np.diag(init_sigmas ** 2)
P0[9, 9] = 2.5e-1 # time variance (0.05^2)
# Noise samplers
pos_std = 20.0 # m measurement noise (more conservative)
pos_sampler = gtsam.Sampler(np.array([pos_std] * 3), 25)
init_sampler = gtsam.Sampler(init_sigmas, 23)
times = np.linspace(0.0, T, N + 1)
num_mc = 100
nees_reset_mc = np.zeros((num_mc, len(times)))
nees_no_reset_mc = np.zeros((num_mc, len(times)))
M = 50 # measurement cadence
for mc in range(num_mc):
nees_reset_mc[mc], nees_no_reset_mc[mc] = run_one_trial(
times, scenario, runner, params, P0, init_sampler, pos_sampler, pos_std, M
)
nees_reset = nees_reset_mc.mean(axis=0)
nees_no_reset = nees_no_reset_mc.mean(axis=0)
# Plot NEES for both filters
dim = 10 # Gal3 tangent dimension
fig = go.Figure()
fig.add_scatter(x=times, y=nees_reset, name='With reset', line=dict(color="#0fae66"))
fig.add_scatter(x=times, y=nees_no_reset, name='Without reset', line=dict(color='#d62728'))
fig.add_scatter(x=times, y=[dim] * len(times), name='Expected NEES (dim)',
line=dict(color='black', dash='dash'))
fig.update_layout(title='NEES comparison: reset vs. no reset',
xaxis_title='Time (s)', yaxis_title='NEES')
fig.show()
Interpreting the results¶
These curves are Monte Carlo averages, so they are much closer to the expected behavior than a single run. The dashed line at 10 is the theoretical mean NEES for a 10‑D state.
In this plot, both filters remain above 10, meaning they are still somewhat over‑confident. The no‑reset curve is consistently higher, showing worse inconsistency when covariance is not transported after retraction.
Takeaway. The reset-enabled filter keeps the covariance aligned with the current tangent space, leading to a NEES trace that is better behaved and closer to the expected value. Without reset, the covariance is not transported after retraction, so NEES can become inconsistent over time.