Self-supervised representation learning methods aim to provide powerful deep
feature learning without the requirement of large annotated datasets, thus
alleviating the annotation bottleneck that is one of the main barriers to
practical deployment of deep learning today. These methods have advanced
rapidly in recent years, with their efficacy approaching and sometimes
surpassing fully supervised pre-training alternatives across a variety of data
modalities including image, video, sound, text and graphs. This article
introduces this vibrant area including key concepts, the four main families of
approach and associated state of the art, and how self-supervised methods are
applied to diverse modalities of data. We further discuss practical
considerations including workflows, representation transferability, and compute
cost. Finally, we survey the major open challenges in the field that provide
fertile ground for future work.
%0 Generic
%1 ericsson2021selfsupervised
%A Ericsson, Linus
%A Gouk, Henry
%A Loy, Chen Change
%A Hospedales, Timothy M.
%D 2021
%K representationlearning todo:read
%R 10.1109/MSP.2021.3134634
%T Self-Supervised Representation Learning: Introduction, Advances and
Challenges
%U http://arxiv.org/abs/2110.09327
%X Self-supervised representation learning methods aim to provide powerful deep
feature learning without the requirement of large annotated datasets, thus
alleviating the annotation bottleneck that is one of the main barriers to
practical deployment of deep learning today. These methods have advanced
rapidly in recent years, with their efficacy approaching and sometimes
surpassing fully supervised pre-training alternatives across a variety of data
modalities including image, video, sound, text and graphs. This article
introduces this vibrant area including key concepts, the four main families of
approach and associated state of the art, and how self-supervised methods are
applied to diverse modalities of data. We further discuss practical
considerations including workflows, representation transferability, and compute
cost. Finally, we survey the major open challenges in the field that provide
fertile ground for future work.
@misc{ericsson2021selfsupervised,
abstract = {Self-supervised representation learning methods aim to provide powerful deep
feature learning without the requirement of large annotated datasets, thus
alleviating the annotation bottleneck that is one of the main barriers to
practical deployment of deep learning today. These methods have advanced
rapidly in recent years, with their efficacy approaching and sometimes
surpassing fully supervised pre-training alternatives across a variety of data
modalities including image, video, sound, text and graphs. This article
introduces this vibrant area including key concepts, the four main families of
approach and associated state of the art, and how self-supervised methods are
applied to diverse modalities of data. We further discuss practical
considerations including workflows, representation transferability, and compute
cost. Finally, we survey the major open challenges in the field that provide
fertile ground for future work.},
added-at = {2023-10-25T11:40:53.000+0200},
author = {Ericsson, Linus and Gouk, Henry and Loy, Chen Change and Hospedales, Timothy M.},
biburl = {https://www.bibsonomy.org/bibtex/2a56129daba5372dec167976b9c358f85/annakrause},
description = {2110.09327.pdf},
doi = {10.1109/MSP.2021.3134634},
interhash = {5928d67f0fcad83cb349fbc698091241},
intrahash = {a56129daba5372dec167976b9c358f85},
keywords = {representationlearning todo:read},
note = {cite arxiv:2110.09327},
timestamp = {2023-10-25T11:40:53.000+0200},
title = {Self-Supervised Representation Learning: Introduction, Advances and
Challenges},
url = {http://arxiv.org/abs/2110.09327},
year = 2021
}