In this paper, we consider planning in stochastic shortest path problems, a subclass of Markov Decision Problems (MDP). We focus on medium-size problems whose state space can be fully enumerated. This problem has numerous important applications, such as navigation and planning under uncertainty. We propose a new approach for constructing a multi-level hierarchy of progressively simpler abstractions of the original problem. Once computed, the hierarchy can be used to speed up planning by first finding a policy for the most abstract level and then recursively refining it into a solution to the original problem. This approach is fully automated and delivers a speed-up of two orders of magnitude over a state-of-the-art MDP solver on sample problems while returning near-optimal solutions.
%0 Conference Paper
%1 a.isaza2008
%A Isaza, A.
%A Szepesvári, Cs.
%A Bulitko, V.
%A Greiner, R.
%B UAI
%D 2008
%K MDPs, abstraction, finite learning, macro options path planning, problem, shortest
%P 306--314
%T Speeding Up Planning in Markov Decision Processes via Automatically Constructed Abstractions
%X In this paper, we consider planning in stochastic shortest path problems, a subclass of Markov Decision Problems (MDP). We focus on medium-size problems whose state space can be fully enumerated. This problem has numerous important applications, such as navigation and planning under uncertainty. We propose a new approach for constructing a multi-level hierarchy of progressively simpler abstractions of the original problem. Once computed, the hierarchy can be used to speed up planning by first finding a policy for the most abstract level and then recursively refining it into a solution to the original problem. This approach is fully automated and delivers a speed-up of two orders of magnitude over a state-of-the-art MDP solver on sample problems while returning near-optimal solutions.
@inproceedings{a.isaza2008,
abstract = {In this paper, we consider planning in stochastic shortest path problems, a subclass of Markov Decision Problems (MDP). We focus on medium-size problems whose state space can be fully enumerated. This problem has numerous important applications, such as navigation and planning under uncertainty. We propose a new approach for constructing a multi-level hierarchy of progressively simpler abstractions of the original problem. Once computed, the hierarchy can be used to speed up planning by first finding a policy for the most abstract level and then recursively refining it into a solution to the original problem. This approach is fully automated and delivers a speed-up of two orders of magnitude over a state-of-the-art MDP solver on sample problems while returning near-optimal solutions.},
added-at = {2020-03-17T03:03:01.000+0100},
author = {Isaza, A. and Szepesv{\'a}ri, {Cs}. and Bulitko, V. and Greiner, R.},
biburl = {https://www.bibsonomy.org/bibtex/2e9d6a1818e8d98bfb560606151653975/csaba},
booktitle = {UAI},
date-modified = {2010-11-25 00:53:43 -0700},
interhash = {2a529870a7ba18a2ed2add0a7ca777d1},
intrahash = {e9d6a1818e8d98bfb560606151653975},
keywords = {MDPs, abstraction, finite learning, macro options path planning, problem, shortest},
owner = {Beata},
pages = {306--314},
pdf = {papers/prmdp.pdf},
timestamp = {2020-03-17T03:03:01.000+0100},
title = {Speeding Up Planning in {M}arkov Decision Processes via Automatically Constructed Abstractions},
year = 2008
}