A growing body of research has shown that many classifiers are susceptible to
adversarial examples -- small strategic modifications to test inputs
that lead to misclassification. In this work, we study general non-parametric
methods, with a view towards understanding when they are robust to these
modifications. We establish general conditions under which non-parametric
methods are r-consistent -- in the sense that they converge to optimally robust
and accurate classifiers in the large sample limit.
Concretely, our results show that when data is well-separated, nearest
neighbors and kernel classifiers are r-consistent, while histograms are not.
For general data distributions, we prove that preprocessing by Adversarial
Pruning (Yang et. al., 2019) -- that makes data well-separated -- followed by
nearest neighbors or kernel classifiers also leads to r-consistency.
Description
[2003.06121] When are Non-Parametric Methods Robust?
%0 Journal Article
%1 bhattacharjee2020nonparametric
%A Bhattacharjee, Robi
%A Chaudhuri, Kamalika
%D 2020
%K bayesian generative-models non-parametric readings robustness
%T When are Non-Parametric Methods Robust?
%U http://arxiv.org/abs/2003.06121
%X A growing body of research has shown that many classifiers are susceptible to
adversarial examples -- small strategic modifications to test inputs
that lead to misclassification. In this work, we study general non-parametric
methods, with a view towards understanding when they are robust to these
modifications. We establish general conditions under which non-parametric
methods are r-consistent -- in the sense that they converge to optimally robust
and accurate classifiers in the large sample limit.
Concretely, our results show that when data is well-separated, nearest
neighbors and kernel classifiers are r-consistent, while histograms are not.
For general data distributions, we prove that preprocessing by Adversarial
Pruning (Yang et. al., 2019) -- that makes data well-separated -- followed by
nearest neighbors or kernel classifiers also leads to r-consistency.
@article{bhattacharjee2020nonparametric,
abstract = {A growing body of research has shown that many classifiers are susceptible to
{\em{adversarial examples}} -- small strategic modifications to test inputs
that lead to misclassification. In this work, we study general non-parametric
methods, with a view towards understanding when they are robust to these
modifications. We establish general conditions under which non-parametric
methods are r-consistent -- in the sense that they converge to optimally robust
and accurate classifiers in the large sample limit.
Concretely, our results show that when data is well-separated, nearest
neighbors and kernel classifiers are r-consistent, while histograms are not.
For general data distributions, we prove that preprocessing by Adversarial
Pruning (Yang et. al., 2019) -- that makes data well-separated -- followed by
nearest neighbors or kernel classifiers also leads to r-consistency.},
added-at = {2020-07-16T10:44:54.000+0200},
author = {Bhattacharjee, Robi and Chaudhuri, Kamalika},
biburl = {https://www.bibsonomy.org/bibtex/26902d2bcc30ece1c56e0114a05582a8b/kirk86},
description = {[2003.06121] When are Non-Parametric Methods Robust?},
interhash = {0d97e53739604a860f3dae1687621e5f},
intrahash = {6902d2bcc30ece1c56e0114a05582a8b},
keywords = {bayesian generative-models non-parametric readings robustness},
note = {cite arxiv:2003.06121Comment: under review for ICML 2020},
timestamp = {2020-07-16T10:44:54.000+0200},
title = {When are Non-Parametric Methods Robust?},
url = {http://arxiv.org/abs/2003.06121},
year = 2020
}