The Variational Autoencoder (VAE) has proven to be an effective model for
producing semantically meaningful latent representations for natural data.
However, it has thus far seen limited application to sequential data, and, as
we demonstrate, existing recurrent VAE models have difficulty modeling
sequences with long-term structure. To address this issue, we propose the use
of a hierarchical decoder, which first outputs embeddings for subsequences of
the input and then uses these embeddings to generate each subsequence
independently. This structure encourages the model to utilize its latent code,
thereby avoiding the "posterior collapse" problem which remains an issue for
recurrent VAEs. We apply this architecture to modeling sequences of musical
notes and find that it exhibits dramatically better sampling, interpolation,
and reconstruction performance than a "flat" baseline model. An implementation
of our "MusicVAE" is available online at http://g.co/magenta/musicvae-colab.
Description
[1803.05428] A Hierarchical Latent Vector Model for Learning Long-Term Structure in Music
%0 Generic
%1 roberts2018hierarchical
%A Roberts, Adam
%A Engel, Jesse
%A Raffel, Colin
%A Hawthorne, Curtis
%A Eck, Douglas
%D 2018
%K autoencoder long-term music
%T A Hierarchical Latent Vector Model for Learning Long-Term Structure in
Music
%U http://arxiv.org/abs/1803.05428
%X The Variational Autoencoder (VAE) has proven to be an effective model for
producing semantically meaningful latent representations for natural data.
However, it has thus far seen limited application to sequential data, and, as
we demonstrate, existing recurrent VAE models have difficulty modeling
sequences with long-term structure. To address this issue, we propose the use
of a hierarchical decoder, which first outputs embeddings for subsequences of
the input and then uses these embeddings to generate each subsequence
independently. This structure encourages the model to utilize its latent code,
thereby avoiding the "posterior collapse" problem which remains an issue for
recurrent VAEs. We apply this architecture to modeling sequences of musical
notes and find that it exhibits dramatically better sampling, interpolation,
and reconstruction performance than a "flat" baseline model. An implementation
of our "MusicVAE" is available online at http://g.co/magenta/musicvae-colab.
@misc{roberts2018hierarchical,
abstract = {The Variational Autoencoder (VAE) has proven to be an effective model for
producing semantically meaningful latent representations for natural data.
However, it has thus far seen limited application to sequential data, and, as
we demonstrate, existing recurrent VAE models have difficulty modeling
sequences with long-term structure. To address this issue, we propose the use
of a hierarchical decoder, which first outputs embeddings for subsequences of
the input and then uses these embeddings to generate each subsequence
independently. This structure encourages the model to utilize its latent code,
thereby avoiding the "posterior collapse" problem which remains an issue for
recurrent VAEs. We apply this architecture to modeling sequences of musical
notes and find that it exhibits dramatically better sampling, interpolation,
and reconstruction performance than a "flat" baseline model. An implementation
of our "MusicVAE" is available online at http://g.co/magenta/musicvae-colab.},
added-at = {2018-03-16T15:06:47.000+0100},
author = {Roberts, Adam and Engel, Jesse and Raffel, Colin and Hawthorne, Curtis and Eck, Douglas},
biburl = {https://www.bibsonomy.org/bibtex/2078056c332d91c311a0b8ccfe2ca4c10/rcb},
description = {[1803.05428] A Hierarchical Latent Vector Model for Learning Long-Term Structure in Music},
interhash = {e1a064945728c7552e2317213a51c420},
intrahash = {078056c332d91c311a0b8ccfe2ca4c10},
keywords = {autoencoder long-term music},
note = {cite arxiv:1803.05428},
timestamp = {2018-03-16T15:06:47.000+0100},
title = {A Hierarchical Latent Vector Model for Learning Long-Term Structure in
Music},
url = {http://arxiv.org/abs/1803.05428},
year = 2018
}