
|
PointContrast: Unsupervised Pre-training for 3D Point Cloud Understanding, Spotlight, ECCV 2020
Saining Xie, Jiatao Gu, Demi Guo, Charles R. Qi, Leonidas J. Guibas, Or Litany
Local contrastive learning for 3D representation learning. The unsupervisely learned representation can generalize across tasks and helps improve severl high-level semantic understanding problems rangining from semgentation to detection on six different datasets.
paper / bibtex
@article{xie2020pointcontrast,
title={PointContrast: Unsupervised Pre-training for 3D Point Cloud Understanding},
author={Xie, Saining and Gu, Jiatao and Guo, Demi and Qi, Charles R and Guibas, Leonidas J and Litany, Or},
journal={arXiv preprint arXiv:2007.10985},
year={2020}
}
|
|

|
ImVoteNet: Boosting 3D Object Detection in Point Clouds with Image Votes, CVPR 2020
Charles R. Qi*, Xinlei Chen*, Or Litany, Leonidas J. Guibas (*: equal contribution)
Extensions of VoteNet to leverage RGB images. By lifting 2D image votes to 3D, RGB images can provide strong geometric cues for 3D object localization and pose estimation, while their textures and colors provide semantic cues. A special multi-tower training scheme also makes the 2D-3D feature fusion more effective.
paper / bibtex / code
@article{qi2020imvotenet,
title={ImVoteNet: Boosting 3D Object Detection in Point Clouds with Image Votes},
author={Qi, Charles R and Chen, Xinlei and Litany, Or and Guibas, Leonidas J},
journal={arXiv preprint arXiv:2001.10692},
year={2020}
}
|
|

|
Deep Hough Voting for 3D Object Detection in Point Clouds, Oral Presentation, ICCV 2019
Charles R. Qi, Or Litany, Kaiming He, Leonidas J. Guibas
Best Paper Award Nomination (one of the seven among 1,075 accepted papers) [link]
We show a revive of generalize Hough voting in the era of deep learning for the task of 3D object detection in point clouds. Our voting-based detection network (VoteNet) is both fast and top performing.
paper / bibtex / code / talk
@article{qi2019deep,
title={Deep Hough Voting for 3D Object Detection in Point Clouds},
author={Qi, Charles R and Litany, Or and He, Kaiming and Guibas, Leonidas J},
journal={arXiv preprint arXiv:1904.09664},
year={2019}
}
|
|

|
KPConv: Flexible and Deformable Convolution for Point Clouds, ICCV 2019
Hugues Thomas, Charles R. Qi, Jean-Emmanuel Deschaud, Beatriz Marcotegui, Francois Goulette, Leonidas J. Guibas
Proposed a point centric way for deep learning on 3D point clouds with kernel point convolution (KPConv) where we define a convolution kernel as a set of spatially localized and deformable points.
paper / bibtex / code
@article{thomas2019kpconv,
title={KPConv: Flexible and Deformable Convolution for Point Clouds},
author={Thomas, Hugues and Qi, Charles R, Deschaud, Jean-Emmanuel and Marcotegui, Beatriz and Goulette, Francois and Guibas, Leonidas J},
journal={arXiv preprint arXiv:1904.08889},
year={2019}
}
|
|

|
Generating 3D Adversarial Point Clouds, CVPR 2019
Chong Xiang, Charles R. Qi, Bo Li
Proposed several novel algorithms to craft adversarial point clouds against 3D deep learning models with adversarial points perturbation and adversarial points generation.
paper / bibtex / code
@article{xiang2019adv,
title={Generating 3D Adversarial Point Clouds},
author={Xiang, Chong and and Qi, Charles R and Li, Bo},
journal={Proc. Computer Vision and Pattern Recognition (CVPR), IEEE},
year={2019}
}
|
|

|
FlowNet3D: Learning Scene Flow in 3D Point Clouds, CVPR 2019
Xingyu Liu*, Charles R. Qi*, Leonidas Guibas (*: equal contribution)
Proposed a novel deep neural network that learns scene flow from point clouds in an end-to-end fashion.
paper / bibtex / code
@article{liu2019flownet3d,
title={FlowNet3D: Learning Scene Flow in 3D Point Clouds},
author={Liu, Xingyu and and Qi, Charles R and Guibas, Leonidas J},
journal={Proc. Computer Vision and Pattern Recognition (CVPR), IEEE},
year={2019}
}
|
|

|
Exploring Hidden Dimensions in Parallelizing Convolutional Neural Networks, ICML 2018
Zhihao Jia, Sina Lin, Charles R. Qi, Alex Aiken
We studied how to parallelize training of deep convolutional networks beyond simple data or model parallelism. Proposed a layer-wise parallelism that allows each layer in a network to use an individual parallelization strategy.
paper / bibtex
@article{jia2018exploring,
title={Exploring Hidden Dimensions in Parallelizing Convolutional Neural Networks},
author={Jia, Zhihao and Lin, Sina and Qi, Charles R and Aiken, Alex},
journal={arXiv preprint arXiv:1802.04924},
year={2018}
}
|
|

|
Frustum PointNets for 3D Object Detection from RGB-D Data, CVPR 2018
Charles R. Qi, Wei Liu, Chenxia Wu, Hao Su, and Leonidas J. Guibas
Proposed a novel framework for 3D object detection with image region proposals (lifted to 3D frustums) and PointNets. Our method is simple, efficient and effective, ranking at first place for KITTI 3D object detection benchmark on all categories (11/27/2017).
paper / bibtex / code / website
@article{qi2017frustum,
title={Frustum PointNets for 3D Object Detection from RGB-D Data},
author={Qi, Charles R and Liu, Wei and Wu, Chenxia and Su, Hao and Guibas, Leonidas J},
journal={Proc. Computer Vision and Pattern Recognition (CVPR), IEEE},
year={2018}
}
|
|

|
PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space, NIPS 2017
Charles R. Qi, Li Yi, Hao Su, and Leonidas J. Guibas
Proposed a hierarchical neural network on point sets that captures local context. Compared with PointNet, PointNet++ achieves better performance and generalizability in complex scenes and is able to deal with non-uniform sampling density.
paper / bibtex / code / website / poster
@article{qi2017pointnetplusplus,
title={PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space},
author={Qi, Charles R and Yi, Li and Su, Hao and Guibas, Leonidas J},
journal={arXiv preprint arXiv:1706.02413},
year={2017}
}
|
|

|
PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation, Oral Presentation, CVPR 2017
Charles R. Qi*, Hao Su*, Kaichun Mo, and Leonidas J. Guibas (*: equal contribution)
Proposed novel neural networks to directly consume an unordered point cloud as input, without converting to other 3D representations such as voxel grids first. Rich theoretical and empirical analyses are provided.
paper / bibtex / code / website / presentation video
@article{qi2017pointnet,
title={PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation},
author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J},
journal={Proc. Computer Vision and Pattern Recognition (CVPR), IEEE},
year={2017}
}
|
|

|
Shape Completion using 3D-Encoder-Predictor CNNs and Shape Synthesis, Spotlight Presentation, CVPR 2017
Angela Dai, Charles R. Qi, Matthias Niessner
A data-driven approach to complete partial 3D shapes through a combination of volumetric deep neural networks and 3D shape synthesis.
paper / bibtex / website (code & data available)
@article{dai2017complete,
title={Shape Completion using 3D-Encoder-Predictor CNNs and Shape Synthesis},
author={Dai, Angela and Qi, Charles Ruizhongtai and Nie{\ss}ner, Matthias},
journal={Proc. Computer Vision and Pattern Recognition (CVPR), IEEE},
year={2017}
}
|
|

|
Volumetric and Multi-View CNNs for Object Classification on 3D Data, Spotlight Presentation, CVPR 2016
Charles R. Qi*, Hao Su*, Matthias Niessner, Angela Dai, Mengyuan Yan, and Leonidas J. Guibas (*: equal contribution)
Novel architectures for 3D CNNs that take volumetric or multi-view representations as input.
paper / bibtex / code / website / supp / presentation video
@inproceedings{qi2016volumetric,
author = {Charles Ruizhongtai Qi and Hao Su and Matthias Nie{\ss}ner and
Angela Dai and Mengyuan Yan and Leonidas Guibas},
title = {Volumetric and Multi-View CNNs for Object Classification on 3D Data},
booktitle = {Proc. Computer Vision and Pattern Recognition (CVPR), IEEE},
year = {2016}
}
|
|

|
FPNN: Field Probing Neural Networks for 3D Data, NIPS 2016
Yangyan Li, Soeren Pirk, Hao Su, Charles R. Qi, and Leonidas J. Guibas
A very efficient 3D deep learning method for volumetric data processing that takes advantage of data sparsity in 3D fields.
paper / bibtex / code / website
@article{li2016fpnn,
title={FPNN: Field Probing Neural Networks for 3D Data},
author={Li, Yangyan and Pirk, Soeren and Su, Hao and Qi, Charles R and Guibas, Leonidas J},
journal={arXiv preprint arXiv:1605.06240},
year={2016}
}
|
|

|
Joint Embeddings of Shapes and Images via CNN Image Purification, SIGGRAPH Asia 2015
Yangyan Li*, Hao Su*, Charles R. Qi, Noa Fish, Daniel Cohen-Or, and Leonidas J. Guibas (*: equal contribution)
Cross-modality learning of 3D shapes and 2D images by neural networks. A joint embedding space that is sensitive to 3D geometry difference but agnostic to other nuisances is constructed.
paper / bibtex / code / website / live demo
@article{li2015jointembedding,
Author = {Li, Yangyan and Su, Hao and Qi, Charles Ruizhongtai and Fish, Noa
and Cohen-Or, Daniel and Guibas, Leonidas J.},
Title = {Joint Embeddings of Shapes and Images via CNN Image Purification},
Journal = {ACM Trans. Graph.},
Year = {2015}
}
|
|

|
Render for CNN: Viewpoint Estimation in Images Using CNNs Trained with Rendered 3D Model Views, Oral Presentation, ICCV 2015
Hao Su*, Charles R. Qi*, Yangyan Li, Leonidas J. Guibas (*equal contribution)
Pioneering work that shows large-scale synthetic data rendered from virtual world may greatly benefit deep learning to work in real world. Deliver a state-of-the-art viewpoint estimator.
paper / bibtex / code / website / presentation video
@InProceedings{Su_2015_ICCV,
Title={Render for CNN: Viewpoint Estimation in Images Using CNNs Trained with Rendered 3D Model Views},
Author={Su, Hao and Qi, Charles R. and Li, Yangyan and Guibas, Leonidas J.},
Booktitle={The IEEE International Conference on Computer Vision (ICCV)},
month = {December},
Year= {2015}
}
|