Scientists and engineers are often interested in learning the number of
subpopulations (or components) present in a data set. Practitioners commonly
use a Dirichlet process mixture model (DPMM) for this purpose; in particular,
they count the number of clusters---i.e. components containing at least one
data point---in the DPMM posterior. But Miller and Harrison (2013) warn that
the DPMM cluster-count posterior is severely inconsistent for the number of
latent components when the data are truly generated from a finite mixture; that
is, the cluster-count posterior probability on the true generating number of
components goes to zero in the limit of infinite data. A potential alternative
is to use a finite mixture model (FMM) with a prior on the number of
components. Past work has shown the resulting FMM component-count posterior is
consistent. But existing results crucially depend on the assumption that the
component likelihoods are perfectly specified. In practice, this assumption is
unrealistic, and empirical evidence (Miller and Dunson, 2019) suggests that the
FMM posterior on the number of components is sensitive to the likelihood
choice. In this paper, we add rigor to data-analysis folk wisdom by proving
that under even the slightest model misspecification, the FMM posterior on the
number of components is ultraseverely inconsistent: for any finite $k ın
N$, the posterior probability that the number of components is $k$
converges to 0 in the limit of infinite data. We illustrate practical
consequences of our theory on simulated and real data sets.
Description
[2007.04470] Finite mixture models are typically inconsistent for the number of components
%0 Journal Article
%1 cai2020finite
%A Cai, Diana
%A Campbell, Trevor
%A Broderick, Tamara
%D 2020
%K bayesian mixture-models probability readings
%T Finite mixture models are typically inconsistent for the number of
components
%U http://arxiv.org/abs/2007.04470
%X Scientists and engineers are often interested in learning the number of
subpopulations (or components) present in a data set. Practitioners commonly
use a Dirichlet process mixture model (DPMM) for this purpose; in particular,
they count the number of clusters---i.e. components containing at least one
data point---in the DPMM posterior. But Miller and Harrison (2013) warn that
the DPMM cluster-count posterior is severely inconsistent for the number of
latent components when the data are truly generated from a finite mixture; that
is, the cluster-count posterior probability on the true generating number of
components goes to zero in the limit of infinite data. A potential alternative
is to use a finite mixture model (FMM) with a prior on the number of
components. Past work has shown the resulting FMM component-count posterior is
consistent. But existing results crucially depend on the assumption that the
component likelihoods are perfectly specified. In practice, this assumption is
unrealistic, and empirical evidence (Miller and Dunson, 2019) suggests that the
FMM posterior on the number of components is sensitive to the likelihood
choice. In this paper, we add rigor to data-analysis folk wisdom by proving
that under even the slightest model misspecification, the FMM posterior on the
number of components is ultraseverely inconsistent: for any finite $k ın
N$, the posterior probability that the number of components is $k$
converges to 0 in the limit of infinite data. We illustrate practical
consequences of our theory on simulated and real data sets.
@article{cai2020finite,
abstract = {Scientists and engineers are often interested in learning the number of
subpopulations (or components) present in a data set. Practitioners commonly
use a Dirichlet process mixture model (DPMM) for this purpose; in particular,
they count the number of clusters---i.e. components containing at least one
data point---in the DPMM posterior. But Miller and Harrison (2013) warn that
the DPMM cluster-count posterior is severely inconsistent for the number of
latent components when the data are truly generated from a finite mixture; that
is, the cluster-count posterior probability on the true generating number of
components goes to zero in the limit of infinite data. A potential alternative
is to use a finite mixture model (FMM) with a prior on the number of
components. Past work has shown the resulting FMM component-count posterior is
consistent. But existing results crucially depend on the assumption that the
component likelihoods are perfectly specified. In practice, this assumption is
unrealistic, and empirical evidence (Miller and Dunson, 2019) suggests that the
FMM posterior on the number of components is sensitive to the likelihood
choice. In this paper, we add rigor to data-analysis folk wisdom by proving
that under even the slightest model misspecification, the FMM posterior on the
number of components is ultraseverely inconsistent: for any finite $k \in
\mathbb{N}$, the posterior probability that the number of components is $k$
converges to 0 in the limit of infinite data. We illustrate practical
consequences of our theory on simulated and real data sets.},
added-at = {2020-07-16T11:56:56.000+0200},
author = {Cai, Diana and Campbell, Trevor and Broderick, Tamara},
biburl = {https://www.bibsonomy.org/bibtex/2bb3c049f0e4b1741e12209626f43660f/kirk86},
description = {[2007.04470] Finite mixture models are typically inconsistent for the number of components},
interhash = {495399bfd3b51a1c5d5cca51d6f6cb6e},
intrahash = {bb3c049f0e4b1741e12209626f43660f},
keywords = {bayesian mixture-models probability readings},
note = {cite arxiv:2007.04470Comment: 16 pages, 1 figure},
timestamp = {2020-07-16T11:56:56.000+0200},
title = {Finite mixture models are typically inconsistent for the number of
components},
url = {http://arxiv.org/abs/2007.04470},
year = 2020
}