{ "id": "2008.07519", "version": "v1", "published": "2020-08-17T17:58:26.000Z", "updated": "2020-08-17T17:58:26.000Z", "title": "V2VNet: Vehicle-to-Vehicle Communication for Joint Perception and Prediction", "authors": [ "Tsun-Hsuan Wang", "Sivabalan Manivasagam", "Ming Liang", "Bin Yang", "Wenyuan Zeng", "James Tu", "Raquel Urtasun" ], "comment": "ECCV 2020 (Oral)", "categories": [ "cs.CV" ], "abstract": "In this paper, we explore the use of vehicle-to-vehicle (V2V) communication to improve the perception and motion forecasting performance of self-driving vehicles. By intelligently aggregating the information received from multiple nearby vehicles, we can observe the same scene from different viewpoints. This allows us to see through occlusions and detect actors at long range, where the observations are very sparse or non-existent. We also show that our approach of sending compressed deep feature map activations achieves high accuracy while satisfying communication bandwidth requirements.", "revisions": [ { "version": "v1", "updated": "2020-08-17T17:58:26.000Z" } ], "analyses": { "keywords": [ "joint perception", "vehicle-to-vehicle communication", "compressed deep feature map", "deep feature map activations achieves", "feature map activations achieves high" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }