-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmain.js
1399 lines (1388 loc) · 61 KB
/
main.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
const main = {
title: 'Our Missions are Beyond Mars',
// about_our_lab: migrated to THU-MARS-Web/about_our_lab.vue
banner: [
'banner_human_parkour.png',
'banner_drivevlm.png',
'banner_lcm.jpg',
'banner_robot.jpg',
'VCAD.jpg',
'banner1.jpg',
'banner2.jpg',
],
overview_of_research_directions: [
{
title: 'Multimedia Computing',
content: 'We train AI models that understand and generate multi-modal data such as images, audios, videos and text.',
image: 'multimedia.png'
},
{
title: 'Autonomous Driving',
content: 'We develop the next-generation autonomous driving software stack: from visual scene understanding to neural planning.',
image: 'autonomous_vehicles.png'
},
{
title: 'Robotics',
content: 'We make robots that learn from multiple sensory inputs to interact with the environment.',
image: 'robotics.png'
},
{
title: 'Sensors',
content: 'We devise novel sensors together with AI models to enable brand-new perception applications.',
image: 'sensors.png'
},
],
about_our_leader: [
'Dr. Hang Zhao (赵行) is an Assistant Professor at IIIS, Tsinghua University. His research interests include multi-modal machine learning, robotics and autonomous driving.',
'Dr. Zhao was formerly a Research Scientist at Waymo (known as Google\'s self-driving project). Before that, he got his Ph.D. degree at MIT under the supervision of Professor Antonio Torralba.'
],
graduate_students: [
{
name: 'Chenzhuang Du 杜晨壮',
website: 'https://scholar.google.com/citations?user=VoF-UAEAAAAJ&hl=en',
content: 'Chenzhuang Du is a PhD student starting from 2020. He is interested in applications of foundation models and multi-modal learning.',
image: 'chenzhuang.jpeg',
},
{
name: 'Junru Gu 辜俊儒',
website: 'https://github.com/GentleSmile',
content: 'Junru Gu is a PhD student starting from 2021. He is interested in autonomous driving.',
image: 'gujunru.png',
},
{
name: 'Chenxu Hu 胡晨旭',
website: 'https://huchenxucs.github.io/',
content: 'Chenxu Hu is a PhD student starting from 2021. He is interested in multi-modal learning, across vision, language and audio.',
image: 'chenxu.jpeg',
},
{
name: 'Tianyuan Yuan 袁天远',
website: '',
content: 'Tianyuan Yuan is a PhD student starting from 2022. He is interested in computer vision and autonomous driving.',
image: 'yuantianyuan.jpg',
},
{
name: 'Xiaoyu Tian 田晓宇',
website: '',
content: 'Xiaoyu Tian is a PhD student starting from 2023. He is interested in computer vision and autonomous driving.',
image: 'kenan.jpeg',
},
{
name: 'Yicheng Liu 刘毅成',
website: 'https://scholar.google.com/citations?user=vRmsgQUAAAAJ&hl',
content: 'Yicheng Liu is a PhD student starting from 2023. He is interested in autonomous driving and representation learning.',
image: 'yicheng.jpg',
},
{
name: 'Leo Zhuang 庄子文',
website: 'https://ziwenzhuang.github.io',
content: 'Leo Ziwen Zhuang is a PhD student starting from 2024. He is interested in robot learning.',
image: 'ziwen.jpg',
},
{
name: 'Shaoting Zhu 朱少廷',
website: '',
content: 'Shaoting Zhu is a PhD student starting from 2024. He is interested in robot learning.',
image: 'kenan.jpeg',
},
{
name: 'Baijun Ye 叶柏均',
website: '',
content: 'Baijun Ye is a PhD student starting from 2024. He is interested in neural rendering.',
image: 'kenan.jpeg',
},
],
research_assistants: [
{
name: 'Qiao Sun 孙桥',
website: 'https://qiaosun.me/',
content: 'Qiao Sun is a researcher in the lab. He is interested in planning and simulation for autonomous driving.',
image: 'qiaosun.png',
},
{
name: 'Derun Li 李德润',
website: '',
content: 'Derun Li is a research assistant in the lab. He is also a PhD student at Shanghai Jiaotong University.',
image: 'kenan.jpeg',
},
{
name: 'Anna Min 闵安娜',
website: '',
content: 'Anna Min is a research assistant in the lab. She is interested in multi-modal learning.',
image: 'xiaolan.png',
},
{
name: 'Zhuoguang Chen 陈卓光',
website: '',
content: 'Zhuoguang Chen is a research assistant in the lab. He is interested in 3D perception.',
image: 'kenan.jpeg',
},
{
name: 'Moonjun Gong 吴国锋',
website: '',
content: 'Moonjun Gong is a research assistant in the lab. He is interested in 3D perception.',
image: 'kenan.jpeg',
},
{
name: 'Minghui Qin 秦明辉',
website: '',
content: 'Minghui Qin is a research assistant in the lab. He is interested in embodied agents.',
image: 'kenan.jpeg',
},
{
name: 'Ziyu Xiao 肖子昱',
website: '',
content: 'Ziyu Xiao is a research assistant in the lab. He is interested in neural rendering.',
image: 'kenan.jpeg',
},
{
name: 'Chuanhao Yan 颜川皓',
website: '',
content: 'Chuanhao Yan is an incoming PhD student in the lab. He is interested in generative models.',
image: 'kenan.jpeg',
},
{
name: 'Kechen Fang 方科晨',
website: '',
content: 'Kechen Fang is an incoming PhD student in the lab. He is interested in generative models.',
image: 'kenan.jpeg',
},
],
alumni: [
{
name: 'Wenxiao Wang 汪文潇',
website: 'https://wangwenxiao.github.io/',
content: 'Wenxiao Wang was a MS student from 2020 to 2021. He is now a PhD student at University of Maryland.',
image: 'wenxiao.jpeg',
},
{
name: 'Renhao Wang 汪仁皓',
website: 'https://scholar.google.com/citations?user=q4RlE2oAAAAJ&hl=en',
content: 'Renhao Wang was a research assistnat from 2021 to 2022. He is now a PhD student at UC Berkeley.',
image: 'kenan.jpeg',
},
{
name: 'Zihui Xue 薛子慧',
website: 'https://zihuixue.github.io/',
content: 'Zihui Xue was a research assistant from 2020 to 2021. She is now a PhD student at UT Austin.',
image: 'zihui.jpeg',
},
{
name: 'Tianyu Hua 华天羽',
website: 'https://patrickhua.github.io/',
content: 'Tianyu Hua was a research assistant from 2020 to 2021. He is an incoming PhD student at Stanford University.',
image: 'tianyu.jpg',
},
{
name: 'Tingle Li 黎庭乐',
website: 'https://tinglok.netlify.app/',
content: 'Tingle Li was a research assistant from 2021 to 2022. He is now a PhD student at UC Berkeley.',
image: 'tingle.jpg',
},
{
name: 'Tianyuan Zhang 张天远',
website: 'http://tianyuanzhang.com/',
content: 'Tianyuan Zhang was a research assistant in 2021. He is an incoming PhD student at MIT.',
image: 'tianyuan.jpg',
},
{
name: 'Zhengqi Gao 高正祺',
website: 'https://zhengqigao.github.io/',
content: 'Zhengqi Gao was an intern in 2021. He is now a PhD student at MIT.',
image: 'zhengqi.png',
},
{
name: 'Qi Li 李祁',
website: 'https://liqi17thu.github.io/',
content: 'Qi Li was an undergraduate intern in 2021. He is now a PhD student at UCLA.',
image: 'qili.jpeg',
},
{
name: 'Bowen Li 李博文',
website: 'https://vision4robotics.github.io/authors/bowen-li/',
content: 'Bowen Li was an undergraduate intern in 2022. He is now a PhD student at CMU.',
image: 'bowenli.jpeg',
},
{
name: 'Sucheng Ren 任苏成',
website: 'https://oliverrensu.github.io/',
content: 'Sucheng was an intern in 2021. He is now at MSRA.',
image: 'sucheng.jpeg',
},
{
name: 'Xuanyao Chen 陈炫耀',
website: '',
content: 'Xuanyao Chen was an intern in the lab from 2021 to 2022. He is an incoming PhD student at IIIS, Tsinghua University.',
image: 'kenan.jpeg',
},
{
name: 'Running Zhao 赵闰宁',
website: '',
content: 'Running Zhao was a visiting PhD student from the University of Hong Kong.',
image: 'kenan.jpeg',
},
{
name: 'Lingyu Zhu 朱玲玉',
website: 'https://ly-zhu.github.io/',
content: 'Lingyu Zhu was a visiting student in 2021 Fall, from Tampere University, Finland.',
image: 'lingyuzhu.png',
},
{
name: 'Ziyuan Huang 黄子渊',
website: 'https://huang-ziyuan.github.io/',
content: 'Ziyuan Huang was a visiting student in 2022, from National University of Singapore.',
image: 'ziyuan.jpeg',
},
{
name: 'Siting Li 李思婷',
website: '',
content: 'Siting Li was an undergraduate intern from 2021 to 2023. She is now a PhD student at University of Washington.',
image: 'kenan.jpeg',
},
{
name: 'Zitian Tang 唐梓天',
website: 'https://zitiantang.github.io/',
content: 'Zitian Tang was an undergraduate intern from 2021 to 2023. He is now a PhD student at Brown University.',
image: 'zitian.jpg',
},
{
name: 'Wenjie Ye 叶闻捷',
website: '',
content: 'Wenjie Ye was an undergraduate intern from 2021 to 2023. He is now a researcher at Shanghai Qi Zhi Institute.',
image: 'kenan.jpeg',
},
{
name: 'Luca Yu 于江涛',
website: 'https://lucayu.me',
content: 'Luca Jiangtao Yu was an intern in the lab. He is now a PhD student at the University of Hong Kong.',
image: 'lucayu.jpg',
},
{
name: 'John Zheng 郑亮涛',
website: 'https://zltjohn.github.io/',
content: 'John Zheng was an intern in the lab. He is now a MS student at UCSD.',
image: 'johnzheng.png',
},
{
name: 'Jiageng Mao 毛佳庚',
website: '',
content: 'Jiageng Mao was an intern in the lab. He is now a PhD student at University of Southern California.',
image: 'kenan.jpeg',
},
{
name: 'Cindy Zeng 曾思齐',
website: '',
content: 'Cindy Zeng was an intern in the lab. She is now a PhD student at the University of Illinois Urbana-Champaign.',
image: 'xiaolan.png',
},
{
name: 'Shiduo Zhang 张世铎',
website: '',
content: 'Shiduo Zhang was an intern in the lab. He is now at Fudan University.',
image: 'kenan.jpeg',
},
{
name: 'Xuan Xiong 熊璇',
website: '',
content: 'Xuan Xiong was a research assistant in the lab.',
image: 'xiaolan.png',
},
{
name: 'Cynthia Yu 俞秉宏',
website: '',
content: 'Cynthia Yu was a research assistant in the lab. She is now at NetEase.',
image: 'xiaolan.png',
},
{
name: 'Zhijie Huang 黄治杰',
website: '',
content: 'Zhijie Huang was a research assistant in the lab. He is now a PhD student at Tokyo University.',
image: 'kenan.jpeg',
},
{
name: 'Zenan Li 李泽楠',
website: '',
content: 'Zenan Li was a research assistant in the lab.',
image: 'kenan.jpeg',
},
{
name: 'Tao Jiang 江涛',
website: '',
content: 'Tao Jiang was a research assistant in the lab. He is now at GalaXea AI.',
image: 'jiangtao.jpeg',
},
{
name: 'Simian Luo 骆思勉',
website: 'https://luosiallen.github.io',
content: 'Simian Luo was a MS student in the lab. He is now starting his own company.',
image: 'lsm.jpg',
},
],
projects: [
{
title: 'PreSight: Enhancing Autonomous Vehicle Perception with City-Scale NeRF Priors',
publisher: 'ECCV 2024',
authors: 'Tianyuan Yuan, Yucheng Mao, Jiawei Yang, Yicheng Liu, Yue Wang, Hang Zhao',
description: '"A novel framework leverages past traversals to construct static prior, enhancing online perception in later navigations."',
paper_link: 'https://arxiv.org/abs/2403.09079',
project_link: '',
image: 'PreSight.png',
extra_link: [
{
name: 'Video',
link: 'https://www.youtube.com/watch?v=sC3WaLIPCUc',
},
{
name: 'Code',
link: 'https://github.com/yuantianyuan01/PreSight',
},
],
},
{
title: 'Humanoid Parkour Learning',
publisher: 'CoRL 2024',
authors: 'Ziwen Zhuang, ShenZhe Yao, Hang Zhao',
description: '"The first humanoid robot that learns to parkour!"',
paper_link: 'https://arxiv.org/abs/2406.10759',
project_link: 'https://humanoid4parkour.github.io/',
image: 'human_parkour.png',
extra_link: [],
},
{
title: 'DriveVLM: The Convergence of Autonomous Driving and Large Vision-Language Models',
publisher: 'CoRL 2024',
authors: 'Xiaoyu Tian*, Junru Gu*, Bailin Li*, Yicheng Liu*, Yang Wang, Zhiyong Zhao, Kun Zhan, Peng Jia, XianPeng Lang, Hang Zhao',
description: '"A dual system (fast+slow thinking) that leverages large vision-language models for autonomous driving"',
paper_link: 'https://arxiv.org/abs/2402.12289',
project_link: 'https://tsinghua-mars-lab.github.io/DriveVLM/',
image: 'drivevlm.png',
extra_link: [],
},
{
title: 'Latent Consistency Models: Synthesizing High-Resolution Images With Few-Step Inference',
publisher: 'arXiv Preprint',
authors: 'Simian Luo*, Yiqin Tan*, Longbo Huang†, Jian Li†, Hang Zhao†',
description: '"LCMs: The next generation of generative models after Latent Diffusion Models (LDMs)."',
paper_link: 'https://arxiv.org/abs/2310.04378',
project_link: 'https://latent-consistency-models.github.io',
image: 'lcm.png',
extra_link: [
{
name: 'Demo',
link: 'https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model',
},
{
name: 'Code',
link: 'https://github.com/luosiallen/latent-consistency-model',
},
{
name: 'LCM-LoRA Paper',
link: 'https://arxiv.org/abs/2311.05556',
},
],
},
{
title: 'Diff-Foley: Synchronized Video-to-Audio Synthesis with Latent Diffusion Models',
publisher: 'NeurIPS 2023',
authors: 'Simian Luo, Chuanhao Yan, Chenxu Hu, Hang Zhao†',
description: 'A Highly Synchronized Video-to-Audio Generative Model (Neural Foley)',
paper_link: 'https://arxiv.org/abs/2306.17203v1',
project_link: 'https://diff-foley.github.io',
image: 'diff-foley.png',
extra_link: [
{
name: 'Code',
link: 'https://github.com/luosiallen/Diff-Foley',
},
],
},
{
title: 'Robot Parkour Learning',
publisher: 'CoRL 2023 Best System Paper Finalist (Top3)',
authors: 'Ziwen Zhuang*, Zipeng Fu*, Jianren Wang, Christopher G Atkeson, Sören Schwertfeger, Chelsea Finn, Hang Zhao',
description: 'Robot parkour skills empowered by onboard vision and a neural network!',
paper_link: 'https://robot-parkour.github.io/resources/Robot_Parkour_Learning.pdf',
project_link: 'https://robot-parkour.github.io/',
image: 'parkour.jpeg',
extra_link: [
{
name: 'Code',
link: 'https://github.com/ZiwenZhuang/parkour',
},
],
},
{
title: 'Radio2Text: Streaming Speech Recognition Using mmWave Radio Signals',
publisher: 'Ubicomp/ISWC 2023',
authors: 'Running Zhao*, Jiangtao Yu*, Hang Zhao†, Edith C.H. Ngai†',
description: '"The first mmWave-based system for streaming ASR with a vocabulary size exceeding 13k words."',
paper_link: 'https://arxiv.org/abs/2308.08125',
image: 'radio2text.png',
},
{
title: 'ChatDB: Augmenting LLMs with Databases as Their Symbolic Memory',
publisher: 'LLM@IJCAI 2023',
authors: 'Chenxu Hu*, Jie Fu*†, Chenzhuang Du, Simian Luo, Junbo Zhao, Hang Zhao†',
description: '"Augmenting LLMs with symbolic memory for accurate memory and complex reasoning."',
paper_link: 'https://arxiv.org/abs/2306.03901',
project_link: 'https://chatdatabase.github.io/',
image: 'chatdb.png',
extra_link: [
{
name: 'Code',
link: 'https://github.com/huchenxucs/ChatDB',
},
],
},
{
title: 'Occ3D: A Large-Scale 3D Occupancy Prediction Benchmark for Autonomous Driving',
publisher: 'NeurIPS Dataset Track 2023',
authors: 'Xiaoyu Tian*, Tao Jiang*, Longfei Yun, Yucheng Mao, Huitong Yang, Yue Wang, Yilun Wang, Hang Zhao',
// description: '""',
paper_link: 'https://arxiv.org/abs/2304.14365',
project_link: 'https://tsinghua-mars-lab.github.io/Occ3D/',
image: 'Occ3D.png',
extra_link: [
{
name: 'Dataset',
link: 'https://github.com/Tsinghua-MARS-Lab/Occ3D',
},
],
},
// {
// title: 'SSCBench: A Large-Scale 3D Semantic Scene Completion Benchmark for Autonomous Driving',
// publisher: 'Preprint',
// authors: 'Yiming Li*, Sihang Li*, Xinhao Liu*, Moonjun Gong*, Kenan Li, Nuo Chen, Zijun Wang, Zhiheng Li, Tao Jiang, Fisher Yu, Yue Wang, Hang Zhao, Zhiding Yu, Chen Feng',
// // description: '""',
// paper_link: 'https://arxiv.org/abs/2306.09001',
// project_link: 'https://github.com/ai4ce/SSCBench',
// image: 'sscbench.png',
// extra_link: [
// {
// name: 'Dataset',
// link: 'https://github.com/ai4ce/SSCBench/tree/main/dataset',
// },
// ],
// },
{
title: 'VectorMapNet: End-to-end Vectorized HD Map Learning',
publisher: 'ICML 2023',
authors: 'Yicheng Liu, Tianyuan Yuan, Yue Wang, Yilun Wang, Hang Zhao',
description: '"An end-to-end vectorized HD map learning framework for autonomous driving!"',
paper_link: 'https://arxiv.org/abs/2206.08920',
project_link: 'https://tsinghua-mars-lab.github.io/vectormapnet/',
image: 'vectormapnet.png',
},
{
title: 'What Happened 3 Seconds Ago? Inferring the Past with Thermal Imaging',
publisher: 'CVPR 2023',
authors: 'Zitian Tang, Wenjie Ye, Wei-Chiu Ma, Hang Zhao',
description: '"Using thermal cues to estimate the past human motions."',
paper_link: 'https://arxiv.org/abs/2304.13651',
extra_link: [
{
name: 'Dataset',
link: 'https://github.com/ZitianTang/Thermal-IM',
},
],
image: 'ThermalIM.png',
},
{
title: 'Neural Map Prior for Autonomous Driving',
publisher: 'CVPR 2023',
authors: 'Xuan Xiong, Yicheng Liu, Tianyuan Yuan, Yue Wang, Yilun Wang, Hang Zhao',
description: '"A neural representation of HD maps to improve local map inference performance for autonomous driving!"',
paper_link: 'https://arxiv.org/abs/2304.08481',
project_link: 'https://tsinghua-mars-lab.github.io/neural_map_prior/',
image: 'neuralmapprior.png',
},
{
title: 'ViP3D: End-to-end Visual Trajectory Prediction via 3D Agent Queries',
publisher: 'CVPR 2023',
authors: 'Junru Gu\*, Chenxu Hu\*, Tianyuan Zhang, Xuanyao Chen, Yilun Wang, Yue Wang, Hang Zhao',
description: '"A vision-based approach to trajectory prediction for autonomous driving!"',
paper_link: 'https://arxiv.org/abs/2208.01582',
project_link: 'https://tsinghua-mars-lab.github.io/ViP3D/',
image: 'vip3d.gif',
},
{
title: 'InterSim: Interactive Traffic Simulation via Explicit Relation Modeling',
publisher: 'IROS 2022',
authors: 'Qiao Sun, Xin Huang, Brian C Williams, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2210.14413',
project_link: 'https://tsinghua-mars-lab.github.io/InterSim/',
image: 'intersim.png',
},
{
title: 'Radio2Speech: High Quality Speech Recovery from Radio Frequency Signals',
publisher: 'Interspeech 2022',
authors: 'Running Zhao, Jiangtao Yu, Tingle Li, Hang Zhao\*, Edith C.H. Ngai\*',
description: '"High-quality speech recovery system for millimeter-wave radar without deafness!"',
paper_link: 'https://arxiv.org/abs/2206.11066',
project_link: 'https://zhaorunning.github.io/Radio2Speech/',
image: 'radio2speech.jpg',
},
{
title: 'M2I: From Factored Marginal Trajectory Prediction to Interactive Prediction',
publisher: 'CVPR 2022',
authors: 'Qiao Sun\*, Xin Huang\*, Junru Gu, Brian C. Williams, Hang Zhao',
description: '"Relationship predictions boost your motion prediction models for interactive predictions!"',
paper_link: 'https://arxiv.org/abs/2202.118844',
project_link: 'https://tsinghua-mars-lab.github.io/M2I/',
image: 'M2I.gif',
},
{
title: 'FUTR3D: A Unified Sensor Fusion Framework for 3D Detection',
publisher: 'CVPRW 2023',
authors: 'Xuanyao Chen, Tianyuan Zhang, Yue Wang, Yilun Wang, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2203.10642',
project_link: 'https://tsinghua-mars-lab.github.io/futr3d/',
image: 'futr3d.png',
},
// {
// title: 'MUTR3D: A Multi-camera Tracking Framework via 3D-to-2D Queries',
// publisher: 'CVPR Workshop on Autonomous Driving 2022',
// authors: 'Tianyuan Zhang, Xuanyao Chen, Yue Wang, Yilun Wang, Hang Zhao',
// paper_link: 'https://arxiv.org/abs/2205.00613',
// project_link: 'https://tsinghua-mars-lab.github.io/mutr3d/',
// image: 'mutr3d.png',
// },
// {
// title: 'SEMI: Self-supervised Exploration via Multisensory Incongruity',
// publisher: 'ICRA 2022',
// authors: 'Jianren Wang*, Ziwen Zhuang*, Hang Zhao',
// description: '"Multi-sensory incongruity incentizes RL agents to explore!"',
// paper_link: 'https://arxiv.org/abs/2009.12494',
// project_link: '',
// image: 'SEMI.jpeg',
// },
{
title: 'HDMapNet: An Online HD Map Construction and Evaluation Framework',
publisher: 'ICRA 2022, CVPR 2021 Workshop best paper nominee',
authors: 'Qi Li, Yue Wang, Yilun Wang, Hang Zhao',
description: '"HD map sensing from onboard sensors!"',
paper_link: 'https://arxiv.org/abs/2107.06307',
project_link: 'https://tsinghua-mars-lab.github.io/HDMapNet/',
image: 'hdmapnet_demo.gif',
tags: [
'hot',
],
},
{
title: 'Neural Dubber: Dubbing for Videos According to Scripts',
publisher: 'NeurIPS 2021',
authors: 'Chenxu Hu, Qiao Tian, Tingle Li, Yuping Wang, Yuxuan Wang, Hang Zhao',
description: '"First achieve automatic video dubbing computationally by neural network!"',
paper_link: 'https://arxiv.org/abs/2110.08243',
project_link: 'https://tsinghua-mars-lab.github.io/NeuralDubber/',
image: 'neural_dubber.png',
},
{
title: 'What Makes Multi-Modal Learning Better than Single (Provably)',
publisher: 'NeurIPS 2021',
authors: 'Yu Huang, Chenzhuang Du, Zihui Xue, Xuanyao Chen, Hang Zhao, Longbo Huang',
paper_link: 'https://proceedings.neurips.cc/paper/2021/hash/5aa3405a3f865c10f420a4a7b55cbff3-Abstract.html',
image: 'mm_better.png',
},
{
title: 'DETR3D: 3D Object Detection from Multi-view Images via 3D-to-2D Queries',
publisher: 'CoRL 2021',
authors: 'Yue Wang, Vitor Campagnolo Guizilini, Tianyuan Zhang, Yilun Wang, Hang Zhao, Justin Solomon',
description: '"A new paradigm of 3D object detection from 2D images!"',
paper_link: 'https://openreview.net/pdf?id=xHnJS2GYFDz',
project_link: '',
image: 'detr3d.jpeg',
tags: [
'hot',
],
},
{
title: 'On Feature Decorrelation in Self-Supervised Learning',
publisher: 'ICCV 2021 Oral',
authors: 'Tianyu Hua, Wenxiao Wang, Zihui Xue, Yue Wang, Sucheng Ren, Hang Zhao',
description: '"It reveals the connection between model collapse and feature correlations!"',
paper_link: 'https://openaccess.thecvf.com/content/ICCV2021/html/Hua_On_Feature_Decorrelation_in_Self-Supervised_Learning_ICCV_2021_paper.html',
project_link: 'https://tsinghua-mars-lab.github.io/decorr/',
image: 'decorrelation.jpg',
tags: [
'hot',
],
},
{
title: 'Large Scale Interactive Motion Forecasting for Autonomous Driving: The Waymo Open Motion Dataset',
publisher: 'ICCV 2021 Oral',
authors: 'Scott Ettinger, Shuyang Cheng, Benjamin Caine, Chenxi Liu, Hang Zhao, Sabeek Pradhan, Yuning Chai, Ben Sapp, Charles Qi, Yin Zhou, Zoey Yang, Aurelien Chouard, Pei Sun, Jiquan Ngiam, Vijay Vasudevan, Alexander McCauley, Jonathon Shlens, Dragomir Anguelov',
// description: '"A high-quality interactive motion prediction dataset for autonomous driving!"',
paper_link: 'https://openaccess.thecvf.com/content/ICCV2021/html/Ettinger_Large_Scale_Interactive_Motion_Forecasting_for_Autonomous_Driving_The_Waymo_ICCV_2021_paper.html',
project_link: 'https://blog.waymo.com/2021/03/expanding-waymo-open-dataset-with-interactive-scenario-data-and-new-challenges.html',
image: 'waymo_motion.gif',
},
{
title: 'Multimodal Knowledge Expansion',
publisher: 'ICCV 2021',
authors: 'Zihui Xue, Sucheng Ren, Zhengqi Gao, Hang Zhao',
description: '"Multimodal data brings knowledge for free!"',
paper_link: 'https://openaccess.thecvf.com/content/ICCV2021/html/Xue_Multimodal_Knowledge_Expansion_ICCV_2021_paper.html',
project_link: 'https://tsinghua-mars-lab.github.io/MKE/',
image: 'mke.png',
},
{
title: 'DenseTNT: End-to-end Trajectory Prediction from Dense Goal Sets',
publisher: 'ICCV 2021, Waymo Motion Prediction Challenge Winner',
authors: 'Junru Gu, Chen Sun, Hang Zhao',
description: '"A SOTA anchor-free and end-to-end multi-trajectory prediction model"',
paper_link: 'https://openaccess.thecvf.com/content/ICCV2021/html/Gu_DenseTNT_End-to-End_Trajectory_Prediction_From_Dense_Goal_Sets_ICCV_2021_paper.html',
project_link: 'https://tsinghua-mars-lab.github.io/DenseTNT/',
extra_link: [
{
name: 'Waymo Challenge Report',
link: 'https://arxiv.org/abs/2106.14160',
},
],
image: 'densetnt.png',
},
// {
// title: 'CVC: Contrastive Learning for Non-parallel Voice Conversion',
// publisher: 'Interspeech 2021 (ISCA travel grant award)',
// authors: 'Tingle Li, Yichen Liu, Chenxu Hu, Hang Zhao',
// description: '"It only requires one-way GAN training for non-parallel voice conversion"',
// paper_link: 'https://www.isca-speech.org/archive/interspeech_2021/li21d_interspeech.html',
// project_link: 'https://tinglok.netlify.app/files/cvc/',
// image: 'CVC.jpg',
// },
// {
// title: 'HDMapGen: A Hierarchical Graph Generative Model of High Definition Maps',
// publisher: 'CVPR 2021',
// authors: 'Lu Mi, Hang Zhao, Charlie Nash, Xiaohan Jin, Jiyang Gao, Chen Sun, Cordelia Schmid, Nir Shavit, Yuning Chai, Dragomir Anguelov',
// paper_link: 'https://openaccess.thecvf.com/content/CVPR2021/html/Mi_HDMapGen_A_Hierarchical_Graph_Generative_Model_of_High_Definition_Maps_CVPR_2021_paper.html',
// image: 'HDMapGen.jpg',
// },
{
title: 'TNT: Target-driveN Trajectory Prediction',
publisher: 'CoRL 2020',
authors: 'Hang Zhao*, Jiyang Gao*, Tian Lan, Chen Sun, Benjamin Sapp, Balakrishnan Varadarajan, Yue Shen, Yi Shen, Yuning Chai, Cordelia Schmid, Congcong Li, Dragomir Anguelov',
description: '"A new learning-based framework for multi-trajectory prediction!"',
paper_link: 'https://arxiv.org/abs/2008.08294',
project_link: '',
image: 'tnt.png',
tags: [
'hot',
],
},
{
title: 'VectorNet: Encoding HD Maps and Agent Dynamics from Vectorized Representation',
publisher: 'CVPR 2020',
authors: 'Jiyang Gao, Chen Sun, Hang Zhao, Yi Shen, Dragomir Anguelov, Congcong Li, Cordelia Schmid',
description: '"A graphical representation of HD maps."',
paper_link: 'https://openaccess.thecvf.com/content_CVPR_2020/papers/Gao_VectorNet_Encoding_HD_Maps_and_Agent_Dynamics_From_Vectorized_Representation_CVPR_2020_paper.pdf',
project_link: 'https://blog.waymo.com/2020/05/vectornet.html',
image: 'vectornet.gif',
tags: [
'hot',
],
},
{
title: 'Scalability in Perception for Autonomous Driving: Waymo Open Dataset',
publisher: 'CVPR 2020',
authors: 'Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, Vijay Vasudevan, Wei Han, Jiquan Ngiam, Hang Zhao, Aleksei Timofeev, Scott Ettinger, Maxim Krivokon, Amy Gao, Aditya Joshi, Sheng Zhao, Shuyang Cheng, Yu Zhang, Jonathon Shlens, Zhifeng Chen, Dragomir Anguelov',
// description: '"One of the largest and most diverse autonomous driving datasets ever released!"',
paper_link: 'https://arxiv.org/abs/1912.04838',
project_link: 'https://waymo.com/open/',
image: 'waymo_od_challenge.jpeg',
tags: [
'hot',
],
},
// {
// title: 'The Sound of Motions',
// publisher: 'ICCV 2019',
// authors: 'Hang Zhao, Chuang Gan, Wei-Chiu Ma, Antonio Torralba',
// description: '"Listen to the sound of motions!"',
// paper_link: 'https://arxiv.org/abs/1904.05979',
// project_link: '',
// image: 'sound_of_motions.png',
// },
// {
// title: 'Self-supervised Moving Vehicle Tracking with Stereo Sound',
// publisher: 'ICCV 2019',
// authors: 'Chuang Gan, Hang Zhao, Peihao Chen, David Cox, Antonio Torralba',
// description: '',
// paper_link: 'http://openaccess.thecvf.com/content_ICCV_2019/html/Gan_Self-Supervised_Moving_Vehicle_Tracking_With_Stereo_Sound_ICCV_2019_paper.html',
// project_link: '',
// image: 'stereo_tracking.jpeg',
// },
// {
// title: 'Through-Wall Human Mesh Recovery Using Radio Signals',
// publisher: 'ICCV 2019',
// authors: 'Mingmin Zhao, Yingcheng Liu, Aniruddh Raghu, Hang Zhao, Tianhong Li, Antonio Torralba, Dina Katabi',
// description: '',
// paper_link: 'http://openaccess.thecvf.com/content_ICCV_2019/html/Zhao_Through-Wall_Human_Mesh_Recovery_Using_Radio_Signals_ICCV_2019_paper.html',
// project_link: '',
// image: 'rfavatar.gif',
// },
{
title: 'HACS: Human Action Clips and Segments Dataset for Recognition and Temporal Localization',
publisher: 'ICCV 2019',
authors: 'Hang Zhao, Zhicheng Yan, Lorenzo Torresani, Antonio Torralba',
description: '"A large-scale dataset for temporal action localization and recognition."',
paper_link: 'https://arxiv.org/abs/1712.09374',
project_link: 'http://hacs.csail.mit.edu/',
image: 'slac.jpeg',
},
{
title: 'The Sound of Pixels',
publisher: 'ECCV 2018',
authors: 'Hang Zhao, Chuang Gan, Andrew Rouditchenko, Carl Vondrick, Josh McDermott, Antonio Torralba',
description: '"Listen to the sound of pixels!"',
paper_link: 'https://arxiv.org/abs/1804.03160',
project_link: 'http://sound-of-pixels.csail.mit.edu/',
image: 'sound_of_pixels.png',
tags: [
'hot',
],
},
// {
// title: 'RF-Based 3D Skeletons',
// publisher: 'SIGCOMM 2018',
// authors: 'Mingmin Zhao, Yonglong Tian, Hang Zhao, Mohammad Alsheikh, Tianhong Li, Rumen Hristov, Zachary Kabelac, Dina Katabi, Antonio Torralba',
// description: '',
// paper_link: 'https://dl.acm.org/citation.cfm?id=3230579',
// project_link: 'http://news.mit.edu/2018/artificial-intelligence-senses-people-through-walls-0612',
// image: 'rf-pose-3d.png',
// },
{
title: 'Through-Wall Human Pose Estimation Using Radio Signals',
publisher: 'CVPR 2018',
authors: 'Mingmin Zhao, Tianhong Li, Mohammad Alsheikh, Yonglong Tian, Hang Zhao, Antonio Torralba, Dina Katabi',
description: 'Superman vision: Seeing human pose through the wall!',
paper_link: 'https://openaccess.thecvf.com/content_cvpr_2018/papers/Zhao_Through-Wall_Human_Pose_CVPR_2018_paper.pdf',
project_link: 'http://rfpose.csail.mit.edu/',
image: 'rf-pose.png',
tags: [
'hot',
],
},
// {
// title: 'Open Vocabulary Scene Parsing',
// publisher: 'ICCV 2017',
// authors: 'Hang Zhao, Xavier Puig, Bolei Zhou, Sanja Fidler, Antonio Torralba',
// description: '',
// paper_link: 'http://openaccess.thecvf.com/content_ICCV_2017/papers/Zhao_Open_Vocabulary_Scene_ICCV_2017_paper.pdf',
// project_link: 'http://sceneparsing.csail.mit.edu/openvoc/',
// image: 'openvoc.png',
// },
{
title: 'Scene Parsing through ADE20K Dataset',
publisher: 'CVPR 2017',
authors: 'Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, Antonio Torralba',
description: '"The most widely used scene parsing dataset."',
paper_link: 'http://people.csail.mit.edu/bzhou/publication/scene-parse-camera-ready.pdf',
code_link: 'https://github.com/CSAILVision/semantic-segmentation-pytorch',
extra_link: [
{
name: 'Paper (IJCV 2018)',
link: 'https://link.springer.com/article/10.1007/s11263-018-1140-0',
},
{
name: 'Full Dataset',
link: 'http://groups.csail.mit.edu/vision/datasets/ADE20K/',
},
{
name: 'MIT Scene Parsing Benchmark',
link: 'http://sceneparsing.csail.mit.edu/',
},
],
image: 'sceneparsing_cvpr2017.png',
tags: [
'hot',
],
},
{
title: 'Loss Functions for Neural Networks for Image Processing',
publisher: 'IEEE TCI 2017',
authors: 'Hang Zhao, Orazio Gallo, Iuri Frosio and Jan Kautz',
description: '',
paper_link: 'http://ieeexplore.ieee.org/iel7/6745852/6960042/07797130.pdf',
project_link: 'http://research.nvidia.com/publication/loss-functions-image-restoration-neural-networks',
image: 'lossNN.png',
},
{
title: 'Duckietown: an Open, Inexpensive and Flexible Platform for Autonomy Education and Research',
publisher: 'ICRA 2017',
authors: '',
description: '"We are building an open-source education and research platform for autonomous driving."',
paper_link: 'http://people.csail.mit.edu/hangzhao/papers/duckietown.pdf',
project_link: 'https://duckietown.mit.edu/',
image: 'duckietown.png',
},
{
title: 'Unbounded High Dynamic Range Photography using a Modulo Camera',
publisher: 'ICCP 2015 Best Paper Runner-up',
authors: 'Hang Zhao, Boxin Shi, Christy Fernandez-Cull, Sai-Kit Yeung and Ramesh Raskar',
description: '',
paper_link: 'http://people.csail.mit.edu/hangzhao/papers/moduloUHDR.pdf',
project_link: 'http://web.media.mit.edu/~hangzhao/modulo.html',
image: 'moduloHDR.png',
tags: [
'hot',
],
},
// {
// title: '',
// publisher: '',
// authors: '',
// description: '',
// paper_link: '',
// project_link: '',
// image: '',
// },
],
publications: [
{
title: 'Humanoid Parkour Learning',
publisher: 'CoRL 2024',
authors: 'Ziwen Zhuang, Shenzhe Yao, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2406.10759',
},
{
title: 'DriveVLM: The Convergence of Autonomous Driving and Large Vision-Language Models',
publisher: 'CoRL 2024',
authors: 'Xiaoyu Tian*, Junru Gu*, Bailin Li*, Yicheng Liu*, Yang Wang, Zhiyong Zhao, Kun Zhan, Peng Jia, Xianpeng Lang, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2402.12289',
},
{
title: 'Uncertainty-Aware Decision Transformer for Stochastic Driving Environments',
publisher: 'CoRL 2024',
authors: 'Zenan Li, Fan Nie, Qiao Sun, Fang Da, Hang Zhao',
paper_link: 'https://openreview.net/pdf?id=LiwdXkMsDv',
},
{
title: 'LiDAR-Based 4D Occupancy Completion and Forecasting ',
publisher: 'IROS 2024',
authors: 'Xinhao Liu, Moonjun Gong, Qi Fang, Haoyu Xie, Yiming Li, Hang Zhao, Chen Feng',
paper_link: 'https://arxiv.org/abs/2310.11239',
},
{
title: 'SSCBench: A Large-Scale 3D Semantic Scene Completion Benchmark for Autonomous Driving',
publisher: 'IROS 2024',
authors: 'Yiming Li*, Sihang Li*, Xinhao Liu*, Moonjun Gong*, Kenan Li, Nuo Chen, Zijun Wang, Zhiheng Li, Tao Jiang, Fisher Yu, Yue Wang, Hang Zhao, Zhiding Yu, Chen Feng',
paper_link: 'https://arxiv.org/abs/2306.09001',
},
{
title: 'PreSight: Enhancing Autonomous Vehicle Perception with City-Scale NeRF Priors',
publisher: 'ECCV 2024',
authors: 'Tianyuan Yuan, Yucheng Mao, Jiawei Yang, Yicheng Liu, Yue Wang, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2403.09079',
},
{
title: 'CVT-Occ: Cost Volume Temporal Fusion for 3D Occupancy Prediction',
publisher: 'ECCV 2024',
authors: 'Zhangchen Ye*, Tao Jiang*, Chenfeng Xu, Yiming Li, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2409.13430',
},
{
title: 'A Unit-based System and Dataset for Expressive Direct Speech-to-Speech Translation',
publisher: 'Interspeech 2024',
authors: 'Anna Min, Chenxu Hu, Yi Ren, Hang Zhao',
paper_link: '',
},
{
title: 'Boosting offline reinforcement learning for autonomous driving with hierarchical latent skills',
publisher: 'ICRA 2024',
authors: 'Zenan Li, Fan Nie, Qiao Sun, Fang Da, Hang Zhao',
paper_link: 'https://ieeexplore.ieee.org/abstract/document/10611197',
},
{
title: 'Latent Consistency Models: Synthesizing High-Resolution Images With Few-Step Inference',
publisher: 'Preprint',
authors: 'Simian Luo*, Yiqin Tan*, Longbo Huang†, Jian Li†, Hang Zhao†',
paper_link: 'https://arxiv.org/abs/2310.04378',
},
{
title: 'StreamMapNet: Streaming mapping network for vectorized online hd map construction',
publisher: 'WACV 2024',
authors: 'Tianyuan Yuan, Yicheng Liu, Yue Wang, Yilun Wang, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2308.12570',
},
{
title: 'Robot Parkour Learning',
publisher: 'CoRL 2023 Best System Paper Finalist (Top3)',
authors: 'Ziwen Zhuang, Zipeng Fu, Jianren Wang, Christopher G Atkeson, Sören Schwertfeger, Chelsea Finn, Hang Zhao',
paper_link: 'https://robot-parkour.github.io/resources/Robot_Parkour_Learning.pdf',
},
{
title: 'Cross-dataset Sensor Alignment: Making Visual 3D Object Detector Generalize',
publisher: 'CoRL 2023',
authors: 'Liangtao Zheng, Yicheng Liu, Yue Wang, Hang Zhao',
paper_link: 'https://openreview.net/pdf?id=dIgCPoy8E3',
},
{
title: 'A Universal Semantic-Geometric Representation for Robotic Manipulation',
publisher: 'CoRL 2023',
authors: 'Tong Zhang, Yingdong Hu, Hanchen Cui, Hang Zhao, Yang Gao',
paper_link: 'https://openreview.net/pdf?id=AIgm8ZE_DlD',
},
{
title: 'Diff-Foley: Synchronized Video-to-Audio Synthesis with Latent Diffusion Models',
publisher: 'NeurIPS 2023',
authors: 'Simian Luo, Chuanhao Yan, Chenxu Hu, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2306.17203',
},
{
title: 'Occ3D: A Large-Scale 3D Occupancy Prediction Benchmark for Autonomous Driving',
publisher: 'NeurIPS 2023',
authors: 'Xiaoyu Tian*, Tao Jiang*, Longfei Yun, Yucheng Mao, Huitong Yang, Yue Wang, Yilun Wang, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2304.14365',
},
{
title: 'PVT++: A Simple End-to-End Latency-Aware Visual Tracking Framework',
publisher: 'ICCV 2023',
authors: 'Bowen Li, Ziyuan Huang, Junjie Ye, Yiming Li, Sebastian Scherer, Hang Zhao, Changhong Fu',
paper_link: 'https://openaccess.thecvf.com/content/ICCV2023/papers/Li_PVT_A_Simple_End-to-End_Latency-Aware_Visual_Tracking_Framework_ICCV_2023_paper.pdf',
},
{
title: 'INT2: Interactive Trajectory Prediction at Intersections',
publisher: 'ICCV 2023',
authors: 'Zhijie Yan, et al.',
paper_link: 'https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_INT2_Interactive_Trajectory_Prediction_at_Intersections_ICCV_2023_paper.pdf',
},
{
title: 'Radio2Text: Streaming Speech Recognition Using mmWave Radio Signals',
publisher: 'Ubicomp/ISWC 2023',
authors: 'Running Zhao*, Jiangtao Yu*, Hang Zhao†, Edith C.H. Ngai†',
paper_link: 'https://arxiv.org/abs/2308.08125',
},
{
title: 'ChatDB: Augmenting LLMs with Databases as Their Symbolic Memory',
publisher: 'LLM@IJCAI 2023',
authors: 'Chenxu Hu*, Jie Fu*†, Chenzhuang Du, Simian Luo, Junbo Zhao, Hang Zhao†',
paper_link: 'https://arxiv.org/abs/2306.03901',
},
{
title: 'P4P: Conflict-Aware Motion Prediction for Planning in Autonomous Driving',
publisher: 'IROS 2023',
authors: 'Qiao Sun, Xin Huang, Brian C. Williams, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2211.01634',
},
{
title: 'What Happened 3 Seconds Ago? Inferring the Past with Thermal Imaging',
publisher: 'CVPR 2023',
authors: 'Zitian Tang, Wenjie Ye, Wei-Chiu Ma, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2304.13651',
},
{
title: 'Neural Map Prior for Autonomous Driving',
publisher: 'CVPR 2023',
authors: 'Xuan Xiong, Yicheng Liu, Tianyuan Yuan, Yilun Wang, Yue Wang, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2304.08481',
},
{
title: 'ViP3D: End-to-end Visual Trajectory Prediction via 3D Agent Queries',
publisher: 'CVPR 2023',
authors: 'Junru Gu*, Chenxu Hu*, Tianyuan Zhang, Xuanyao Chen, Yilun Wang, Yue Wang, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2208.01582',
},
{
title: 'GeoMAE: Masked Geometric Target Prediction for Self-supervised Point Cloud Pre-Training',
publisher: 'CVPR 2023',
authors: 'Xiaoyu Tian, Haoxi Ran, Yue Wang, Hang Zhao',
paper_link: 'https://arxiv.org/abs/2305.08808',
},
{