@EthanJamesLew @KochdumperNiklas I get this error when running the f1tenth benchmark with polynomial observables.
Error trace:
F1tenthCar --------------------------------------------------------------
ValueError Traceback (most recent call last)
/var/folders/jc/77d0pws90139vg5cc_fg8s_w5hwxm2/T/ipykernel_40751/2272883485.py in
173
174 # compute error
--> 175 perc_error, mse = compute_error(model, test_data)
176
177 # store and print results
/var/folders/jc/77d0pws90139vg5cc_fg8s_w5hwxm2/T/ipykernel_40751/2272883485.py in compute_error(model, test_data)
101 teval = np.linspace(start_time, end_time, len(t.times))
102
--> 103 trajectory = model.solve_ivp(
104 initial_state=iv,
105 tspan=(start_time, end_time),
/usr/local/anaconda3/lib/python3.8/site-packages/autokoopman/core/system.py in solve_ivp(self, initial_state, tspan, teval, inputs, sampling_period)
262 diff[diff < 0.0] = float("inf")
263 tidx = diff.argmin()
--> 264 states[idx + 1] = self.step(
265 float(time), states[idx], np.atleast_1d(inputs[tidx])
266 ).flatten()
/usr/local/anaconda3/lib/python3.8/site-packages/autokoopman/core/system.py in step(self, time, state, sinput)
342 self, time: float, state: np.ndarray, sinput: Optional[np.ndarray]
343 ) -> np.ndarray:
--> 344 return self._step_func(time, state, sinput)
345
346 @Property
/usr/local/anaconda3/lib/python3.8/site-packages/autokoopman/estimator/koopman.py in step_func(t, x, i)
76
77 def step_func(t, x, i):
---> 78 obs = (self.obs(x).flatten())[np.newaxis, :]
79 if self._has_input:
80 return np.real(
/usr/local/anaconda3/lib/python3.8/site-packages/autokoopman/core/observables.py in call(self, X)
30
31 def call(self, X: np.ndarray) -> np.ndarray:
---> 32 return self.obs_fcn(X)
33
34 def obs_grad(self, X: np.ndarray) -> np.ndarray:
/usr/local/anaconda3/lib/python3.8/site-packages/autokoopman/core/observables.py in obs_fcn(self, X)
163
164 def obs_fcn(self, X: np.ndarray) -> np.ndarray:
--> 165 return self.poly.transform(np.atleast_2d(X))
166
167
/usr/local/anaconda3/lib/python3.8/site-packages/sklearn/preprocessing/_polynomial.py in transform(self, X)
378 check_is_fitted(self)
379
--> 380 X = self._validate_data(
381 X, order="F", dtype=FLOAT_DTYPES, reset=False, accept_sparse=("csr", "csc")
382 )
/usr/local/anaconda3/lib/python3.8/site-packages/sklearn/base.py in _validate_data(self, X, y, reset, validate_separately, **check_params)
575 raise ValueError("Validation should be done on X, y or both.")
576 elif not no_val_X and no_val_y:
--> 577 X = check_array(X, input_name="X", **check_params)
578 out = X
579 elif no_val_X and not no_val_y:
/usr/local/anaconda3/lib/python3.8/site-packages/sklearn/utils/validation.py in check_array(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, estimator, input_name)
897
898 if force_all_finite:
--> 899 _assert_all_finite(
900 array,
901 input_name=input_name,
/usr/local/anaconda3/lib/python3.8/site-packages/sklearn/utils/validation.py in _assert_all_finite(X, allow_nan, msg_dtype, estimator_name, input_name)
144 "#estimators-that-handle-nan-values"
145 )
--> 146 raise ValueError(msg_err)
147
148 # for object dtype data, we only check for NaNs (GH-13254)
ValueError: Input X contains NaN.
PolynomialFeatures does not accept missing values encoded as NaN natively. For supervised learning, you might want to consider sklearn.ensemble.HistGradientBoostingClassifier and Regressor which accept missing values encoded as NaNs natively. Alternatively, it is possible to preprocess the data, for instance by using an imputer transformer in a pipeline or drop samples with missing values. See https://scikit-learn.org/stable/modules/impute.html You can find a list of all estimators that handle NaN values at the following page: https://scikit-learn.org/stable/modules/impute.html#estimators-that-handle-nan-values