We improve the theoretical analysis and empirical performance of algorithms for
the stochastic multi-armed bandit problem and the linear stochastic multi-armed
bandit problem. In particular, we show that a simple modification of Auer's UCB
algorithm (Auer, 2002) achieves with high probability constant regret. More importantly,
we modify and, consequently, improve the analysis of the algorithm
for the for linear stochastic bandit problem studied by Auer (2002), Dani et al.
(2008), Rusmevichientong and Tsitsiklis (2010), Li et al. (2010). Our modification
improves the regret bound by a logarithmic factor, though experiments show
a vast improvement. In both cases, the improvement stems from the construction
of smaller confidence sets. For their construction we use a novel tail inequality for
vector-valued martingales.
%0 Conference Paper
%1 AYPSz11
%A Abbasi-Yadkori, Y.
%A Pál, D.
%A Szepesvári, Cs.
%B NIPS
%D 2011
%K bandits, learning linear online stochastic theory,
%P 2312--2320
%T Improved Algorithms for Linear Stochastic Bandits (extended, corrected version)
%X We improve the theoretical analysis and empirical performance of algorithms for
the stochastic multi-armed bandit problem and the linear stochastic multi-armed
bandit problem. In particular, we show that a simple modification of Auer's UCB
algorithm (Auer, 2002) achieves with high probability constant regret. More importantly,
we modify and, consequently, improve the analysis of the algorithm
for the for linear stochastic bandit problem studied by Auer (2002), Dani et al.
(2008), Rusmevichientong and Tsitsiklis (2010), Li et al. (2010). Our modification
improves the regret bound by a logarithmic factor, though experiments show
a vast improvement. In both cases, the improvement stems from the construction
of smaller confidence sets. For their construction we use a novel tail inequality for
vector-valued martingales.
@inproceedings{AYPSz11,
abstract = {We improve the theoretical analysis and empirical performance of algorithms for
the stochastic multi-armed bandit problem and the linear stochastic multi-armed
bandit problem. In particular, we show that a simple modification of Auer's UCB
algorithm (Auer, 2002) achieves with high probability constant regret. More importantly,
we modify and, consequently, improve the analysis of the algorithm
for the for linear stochastic bandit problem studied by Auer (2002), Dani et al.
(2008), Rusmevichientong and Tsitsiklis (2010), Li et al. (2010). Our modification
improves the regret bound by a logarithmic factor, though experiments show
a vast improvement. In both cases, the improvement stems from the construction
of smaller confidence sets. For their construction we use a novel tail inequality for
vector-valued martingales.},
added-at = {2020-03-17T03:03:01.000+0100},
author = {Abbasi-Yadkori, Y. and P{\'a}l, D. and Szepesv{\'a}ri, {Cs}.},
biburl = {https://www.bibsonomy.org/bibtex/2687df6ecc0045b9aaf84c353bb145c8e/csaba},
booktitle = {NIPS},
date-added = {2011-09-17 17:51:09 -0600},
date-modified = {2014-04-10 10:07:33 -0700},
interhash = {114a2895023523512fd2dc2927f8f351},
intrahash = {687df6ecc0045b9aaf84c353bb145c8e},
keywords = {bandits, learning linear online stochastic theory,},
month = {December},
pages = {2312--2320},
pdf = {papers/linear-bandits-NIPS2011.pdf},
timestamp = {2020-03-17T03:03:01.000+0100},
title = {Improved Algorithms for Linear Stochastic Bandits (extended, corrected version)},
year = 2011
}