@InProceedings{Supelec246,
author = {Olivier Pietquin},
title = {Learning to Ground in Spoken Dialogue Systems},
year = {2007},
booktitle = {Proceedings of the 32nd IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2007)},
volume = {IV},
pages = {165-168},
month = {April},
address = {Honolulu (Hawaii, USA)},
url = {http://hal-supelec.archives-ouvertes.fr/hal-00213410/fr/},
abstract = {Machine learning methods such as reinforcement learning applied to dialogue strategy optimization has become a leading subject of researches since the mid 90ís. Indeed, the great variability of factors to take into account makes the design of a spoken dialogue system a tailoring task and reusability of previous work is very difficult. Yet, techniques such as reinforcement learning are very demanding in training data while obtaining a substantial amount of data in the particular case of spoken dialogues is time-consuming and therefore expansive. In order to expand existing data sets, dialogue simulation techniques are becoming a standard solution. In this paper, we present a user model for realistic spoken dialogue simulation and a method for using this model so as to simulate the grounding process. This allows including grounding subdialogues as actions in the reinforcement learning process and learning adapted strategy.}
}