author = {Matthieu Geist and Olivier Pietquin and Gabriel Fricout},
title = {From Supervised to Reinforcement Learning: a Kernel-based Bayesian Filtering Framework},
journal = {International Journal On Advances in Software},
year = {2009},
volume = {2},
number = {1},
pages = {101-116},
url = {http://hal-supelec.archives-ouvertes.fr/hal-00429891/en/},
isbn = {1942-2628},
abstract = {In a large number of applications, engineers have to estimate a function linked to the state of a dynamic system. To do so, a sequence of samples drawn from this unknown function is observed while the system is transiting from state to state and the problem is to generalize these observations to unvisited states. Several solutions can be envisioned among which regressing a family of parameterized functions so as to make it fit at best to the observed samples. This is the first problem addressed with the proposed kernel-based Bayesian filtering approach, which also allows quantifying uncertainty reduction occurring when acquiring more samples. Classical methods cannot handle the case where actual samples are not directly observable but only a non linear mapping of them is available, which happens when a special sensor has to be used or when solving the Bellman equation in order to control the system. However the approach proposed in this paper can be extended to this tricky case. Moreover, an application of this indirect function approximation scheme to reinforcement learning is presented. A set of experiments is also proposed in order to demonstrate the efficiency of this kernel-based Bayesian approach.}