Many applications in robotics and human-computer interaction can benefit from
an understanding of 3D motion of points in a dynamic environment, widely noted
as scene flow. While most previous methods focus on solving the problem with
stereo and RGB-D images, few try to estimate scene flow directly from point
clouds. In this work, we propose a novel deep neural network named
$FlowNet3D$ that learns scene flow from point clouds in an end-to-end
fashion. Our network simultaneously learns deep hierarchical point cloud
features, flow embeddings as well as how to smooth the output. We evaluate the
network on both challenging synthetic data and real LiDAR scans from KITTI.
Trained on synthetic data only, our network is able to generalize well to real
scans. Benefited from learning directly in point clouds, our model achieved
significantly more accurate scene flow results compared with various baselines
on stereo images and RGB-D images.
Description
[1806.01411] Learning Scene Flow in 3D Point Clouds
%0 Generic
%1 liu2018learning
%A Liu, Xingyu
%A Qi, Charles R.
%A Guibas, Leonidas J.
%D 2018
%K 2018 arxiv computer-vision paper point-cloud stanford
%T Learning Scene Flow in 3D Point Clouds
%U http://arxiv.org/abs/1806.01411
%X Many applications in robotics and human-computer interaction can benefit from
an understanding of 3D motion of points in a dynamic environment, widely noted
as scene flow. While most previous methods focus on solving the problem with
stereo and RGB-D images, few try to estimate scene flow directly from point
clouds. In this work, we propose a novel deep neural network named
$FlowNet3D$ that learns scene flow from point clouds in an end-to-end
fashion. Our network simultaneously learns deep hierarchical point cloud
features, flow embeddings as well as how to smooth the output. We evaluate the
network on both challenging synthetic data and real LiDAR scans from KITTI.
Trained on synthetic data only, our network is able to generalize well to real
scans. Benefited from learning directly in point clouds, our model achieved
significantly more accurate scene flow results compared with various baselines
on stereo images and RGB-D images.
@misc{liu2018learning,
abstract = {Many applications in robotics and human-computer interaction can benefit from
an understanding of 3D motion of points in a dynamic environment, widely noted
as scene flow. While most previous methods focus on solving the problem with
stereo and RGB-D images, few try to estimate scene flow directly from point
clouds. In this work, we propose a novel deep neural network named
$\textit{FlowNet3D}$ that learns scene flow from point clouds in an end-to-end
fashion. Our network simultaneously learns deep hierarchical point cloud
features, flow embeddings as well as how to smooth the output. We evaluate the
network on both challenging synthetic data and real LiDAR scans from KITTI.
Trained on synthetic data only, our network is able to generalize well to real
scans. Benefited from learning directly in point clouds, our model achieved
significantly more accurate scene flow results compared with various baselines
on stereo images and RGB-D images.},
added-at = {2018-07-20T10:06:25.000+0200},
author = {Liu, Xingyu and Qi, Charles R. and Guibas, Leonidas J.},
biburl = {https://www.bibsonomy.org/bibtex/2e8668602109c96be87e363a08b274b45/analyst},
description = {[1806.01411] Learning Scene Flow in 3D Point Clouds},
interhash = {3d4484f52729ee66ea5160a143f763c0},
intrahash = {e8668602109c96be87e363a08b274b45},
keywords = {2018 arxiv computer-vision paper point-cloud stanford},
note = {cite arxiv:1806.01411},
timestamp = {2018-07-20T10:06:25.000+0200},
title = {Learning Scene Flow in 3D Point Clouds},
url = {http://arxiv.org/abs/1806.01411},
year = 2018
}