-
Notifications
You must be signed in to change notification settings - Fork 4
/
index.html
2473 lines (2241 loc) · 136 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<html>
<head>
<!-- 支持中文 -->
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="google-site-verification" content="vgiOidXHUdMydJehnk-bYoTeRFsRxVppHJKg2orKRaA" />
<title>Hao Dong - Peking University</title>
<style type="text/css">
body {
width: 1400px;
text-align: center;
font-family: "HelveticaNeue-Light", "Helvetica Neue Light", "Helvetica Neue", Helvetica, Arial, "Lucida Grande", sans-serif;
font-weight: 300;
font-size: 16px;
background-color: #FFF;
}
hr {
border: 0;
height: 1px;
background-image: linear-gradient(to right, rgba(0, 0, 0, 0), rgba(0, 0, 0, 0.75), rgba(0, 0, 0, 0));
}
table {
padding: 5px;
}
/* 选颜色 https://www.color-hex.com/color-names.html */
/* https://www.htmlcsscolor.com/hex/3B3B3B */
table.pub_table,
td.pub_td1,
td.pub_td2 {
border-collapse: collapse;
border-bottom: 0px solid #9B9B9B;
padding-bottom: 10px;
padding-top: 10px;
padding-left: 10px;
width: 1100px;
/* width: 1250px; /* hao*/
*/
}
td.pub_td1 {
width: 100px;
}
.level_1 {
display: none;
}
.paper2{
display: none;
}
select{
width:150px;
height: 20px;
border: 0px;
outline:none;
color: #1367a7;
}
td.sub_heading {
color: #3B3B3B;
font-weight: 700;
font-size: 20px;
}
tr {
background-color: #FFF;
}
div#container {
margin-left: auto;
margin-right: auto;
width: 1200px;
text-align: left;
position: relative;
background-color: #FFF;
}
div#DocInfo {
color: #9B9B9B;
height: 128px;
}
h4,
h3,
h2,
h1 {
color: #3B3B3B;
}
h2 {
font-size: 130%;
}
p {
color: #000;
margin-bottom: 20px;
}
p.caption {
color: #9B9B9B;
text-align: left;
width: 600px;
font: 11px helvetica, sans-serif;
}
p.caption2 {
color: #9B9B9B;
text-align: left;
width: 800px;
font: 11px helvetica, sans-serif;
}
#header_img {
position: absolute;
top: 0px;
right: 0px;
}
ul { margin: 0px; } /* remove space on ul list */
a:link,
a:visited {
color: #1367a7;
*/
/* 蓝色
/* color: #990000; /* 深红艄1�71ᅣ1�771ᅣ1�71ᅣ1�777 990000 8b2323*/
*/ font-family: Tahoma, Geneva, sans-serif;
/* 加粗 */
text-decoration: none;
}
.section_div {
background-color: #FFF;
padding: 10px 10px 10px 10px;
margin: 10px 10px 10px 10px;
//border: 1px solid #AAA;
}
.service3 {
display: none;
}
.service4 {
display: none;
}
.btn{
border: 0;
background: none;
cursor: pointer;
}
.btn_two{
margin-left: 9px;
}
.btn_one{
margin-left: 22px;
}
.btn>span{
color: #1367a7;
}
.all_Btn{
border: 0;
background: none;
cursor: pointer;
}
.all_Btn>span{
color: #1367a7;
}
.all_Btn > span:hover {
border-bottom:1px solid #1367a7;
}
.all_Btn > span.current {
border-bottom:1px solid #1367a7;
font-weight: 500;
}
body {
background-color: #FFF;
}
#personal_info {
background-color: #FFF;
}
img.teaser_img {
width: 256px;
display: block;
margin-left: auto;
margin-right: auto;
margin-top: 5px;
margin-bottom: 5px;
border: 0px solid black
}
img.photo_of_me {
border-radius: 20px;
}
div.teaser_img_div {
width: 286px;
}
/* hao: table折叠 */
/* .chartTable{
width:100%;
margin-top:10px;
}
.chartTable th,.chartTable td{
text-align: center;
padding:10px 0;
}
.chartTable th{
background-color:#D7D7D7 ;
}
td.company{text-align: left;}
td.haschild .c_title{cursor: pointer;background: url(http://note.youdao.com/yws/public/resource/a5dec28b4c472b42d7126f3a389e3f28/xmlnote/531FC34716824BE5A6ABD0451F9FDBF0/WEBRESOURCE978aa3969c38110736f0c17a178b04b6/7204) no-repeat; background-size: 20px 20px;background-position: center left;}
td.isopen .c_title{cursor: pointer;background: url(http://note.youdao.com/yws/public/resource/a5dec28b4c472b42d7126f3a389e3f28/xmlnote/531FC34716824BE5A6ABD0451F9FDBF0/WEBRESOURCEed4cebea2ccd991c3265d5a7dd90d0e3/7205) no-repeat; background-size: 20px 20px;background-position: center left;}
.c_title{padding-left:20px;margin-top:0;margin-bottom:0;}
.haschild .c_icon{height:20px;width:20px;float:left}
.level_0 .company .c_title{margin-left:0; color:red;}
.level_1{display:none;}
.level_2{display:none;}
.level_3{display:none;}
.level_1 .company .c_title{margin-left:20px;color:blue;}
.level_2 .company .c_title{margin-left:40px;color:green;}
.level_3 .company .c_title{margin-left:60px;color:#ccc;} */
</style>
<!-- <script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-24665197-4', 'auto');
ga('send', 'pageview');
</script> -->
</head>
<body>
<div id="container">
<div class='section_div'>
<table id="personal_info">
<tr>
<td><img class="photo_of_me" src="images/haodong/haodong10.png" width=180px
style="border: 1px solid black; float:left; margin-right:15px" /></td>
<td>
<div id="DocInfo">
<h1>Hao Dong</h1>
<!-- 董豪 助理教授 博士生导师 北京大学-计算机学院-前沿计算研究中心 <br><br> -->
董豪 北京大学 助理教授 博士生导师 国家级青年人才计划 北大博雅青年学者<br><br>
hao.dong@pku.edu.cn<br><br>
<a href="https://scholar.google.com/citations?hl=en&user=xLFL4sMAAAAJ&view_op=list_works&sortby=pubdate">Google Scholar</a> /
<!-- <a href="https://www.researchgate.net/profile/Hao_Dong35">Research Gate</a> </a> /
<a href="https://dblp.uni-trier.de/pers/hd/d/Dong_0003:Hao">DBLP</a> </a> / -->
<!-- Github: -->
<a href="https://github.com/zsdonghao">Github</a> </a>
<!-- / -->
<!-- <a href="https://github.com/hyperplane-lab">Lab</a> </a> -->
<!-- https://orcid.org/0000-0002-7984-9909 -->
<br> <br>
<!-- <li>
<span style="font-weight:bold;">Open positions available (
<a href="recurit/phd.html">Ph.D</a>,
<a href="recurit/postdoc.html">Postdoc</a>,
<a href="recurit/engineer.html">Engineers</a>,
Interns ). Contact me for details.</span>
</li> -->
</div><br>
</td>
</tr>
</table>
<h2>About Me</h2>
<!-- 董豪,北京大学计算机学院前沿计算研究中心,助理教授,博士生导师。研究方向为人工智能、机器人和计算机视觉。 -->
<!-- 董博士于2019年在英国帝国理工学院获博士学位,2012年获得英国帝国理工学院一等硕士学位、2011年获得英国中央兰开夏大学获一等学士学位。 -->
<!-- <br><br> -->
I am an Assistant Professor at School of Computer Science, Peking University,
where
I lead <a href="#lab"> PKU-Agibot Lab</a>.
<!-- I am also affiliated with Beijing Academy of Articial Intelligence (BAAI 北京智源). -->
<!-- and PengCheng Lab (鹏城实验室). -->
My current research focuses on embodied AI, large models, reinforcement learning and computer vision.
<!-- My current research focuses on several exciting areas: robot vision, robotic manipulation, embodied large models, navigation and autonomous decision making. -->
Our goal is to find the scaling law to create a cost-effective and autonomous robot system.
<!-- <a href="https://yoshuabengio.org/2023/05/07/ai-scientists-safe-and-useful-ai/">in a strictly safe way</a>, -->
<!-- not limited to industrial applications and home assistance scenarios. -->
<!-- aiming to make AI benefits a global scale. -->
Our work has been recognized as a <a href="images/award/.png">Best Application Paper Finalist</a> at IROS.
<!-- and the <a href="images/award/2022 MyoChallenge NeurIPS.png">Winner of MyoChallenge</a> at NeurIPS. -->
<br><br>
Additionally, I am fortunate to serve as an Area Chair or Senior Program Committee member for CVPR, NeurIPS and AAAI conferences,
and as the Associate Editor of ICRA and Machine Intelligence Research. I received the
<a href="images/award/2023 MIR Oustanding Associate Editor Award-Hao Dong.pdf">MIR Outstanding Associate Editor Award</a>.
Also, I have been involved in open source AI system for a long time, I have led several open source projects, such as
<a href="https://prsorg.github.io">Polar Research Station</a>,
<a href="https://github.com/tensorlayer">TensorLayer</a>
<a href="https://github.com/tensorlayer"><img alt="GitHub Stars" style="vertical-align:middle" src="https://img.shields.io/github/stars/tensorlayer?style=social"/></a>
and <a href="https://github.com/openmlsys">OpenMLsys</a> <a href="https://github.com/openmlsys"><img alt="GitHub Stars" style="vertical-align:middle" src="https://img.shields.io/github/stars/openmlsys?style=social"/></a>
, and have won the <a href="paper/ACM MM Certification.pdf">Best Open Source Software Award</a> at ACM Multimedia, as well as the OpenI Outstanding Project Award twice.
<br>
<details><summary><b><font color="1367a7">more</font></b></summary>
Before joining PKU, I obtained my Ph.D. degree from Imperial College London
under the supervision of <a href="https://www.imperial.ac.uk/people/y.guo">Yike Guo</a>.
<!-- <a href="https://provost.hkust.edu.hk/meet_the_provost.html">Yike Guo</a> -->
<!-- He is a Ph.D. student at the Department of Computing of Imperial College London under the supervision of Prof. Yike Guo and Prof. Paul M. Matthews. -->
<!-- My research involves computer vision with the goal of recreating and interacting the world. -->
<!-- My current research involves deep learning and computer vision with the goal of reducing the data required for learning intelligent systems. -->
<!-- and has publications on ICCV, TIFS, TMI, TNSRE, ACM MM, etc.
He is an active reviewer of SIGGRAPH, TIP, TKDE, Neurocomputing, PLUS ONE, etc. -->
<!-- I am passionate about popularising artificial intelligence technologies and established <a href="https://tensorlayer.readthedocs.io">TensorLayer</a>, a deep learning and reinforcement learning library for scientists and engineers, which won the <a href="paper/ACM MM Certification.pdf">Best Open Source Software Award</a> at ACM Multimedia 2017. -->
<!-- MSc specialist degree (visual information processing) -->
Prior to my Ph.D., I received a MSc degree with distinction from Imperial,
and a first-class BEng degree from the University of Central Lancashire.
Furthermore, I have founded a startup focused on AI-driven hardware between 2012 and 2015.
</details>
</div>
<hr>
<div class='section_div'>
<h2>News</h2>
<li><b><font color="red" style="background-color:yellow;" >NEW</font> </b> 近期中文报告:
<ul>
<li> <a href="https://www.bilibili.com/video/BV1zU411U7q1/?buvid=XU591F45A80C6AEDE9C3A6A9417168E61D514&from_spmid=dt.dt-video-quick-cosume.video.0&is_story_h5=false&mid=Dsm%2BIb%2FVzua%2BbxtpKXqKKw%3D%3D&plat_id=116&share_from=ugc&share_medium=android&share_plat=android&share_session_id=374ab0c8-53ad-46e8-b37d-5e1d76b8995a&share_source=WEIXIN&share_tag=s_i&spmid=united.player-video-detail.0.0×tamp=1721912552&unique_k=lFEQrR1&up_id=622988882&share_times=6&wxfid=o7omF0SUhFWtNqcVlXQdHtNhdpSo&vd_source=9486a6185d2390720d5d92dbddc724cb"> 《具身智能关键技术研究:操纵、决策、导航》(2024.6)</a></li>
<li> <a href="https://www.bilibili.com/video/BV1ggWreDExa/?buvid=Z645F66B7DAB64154BEEB8E2B959A109DBE7&is_story_h5=false&mid=CYNhxo%2BNiGPK%2F8K%2BHj0MJQ%3D%3D&plat_id=114&share_from=ugc&share_medium=iphone&share_plat=ios&share_session_id=7D66E54C-2086-49A8-B156-ABB763B0556E&share_source=WEIXIN&share_tag=s_i×tamp=1724593839&unique_k=uEqX0Ix&up_id=622988882&vd_source=9486a6185d2390720d5d92dbddc724cb">《具身智能技术趋势分析》(2024.8)</a></li>
</ul>
<li>[2024/10] <b><font color="red" style="background-color:yellow;" >NEW</font></b> <a href="https://arxiv.org/pdf/2403.18195">SCANet</a> is recognized as a <font color="red">Best Application Paper Finalist</font> at IROS 2024.
<li>[2024/09] Two papers get accepted to NeurIPS 2024</a>
<li>[2024/09] The world's first general navigation large model that unifies visual-language navigation, object navigation as well as demand-driven navigation into one single framework: <a href="https://sites.google.com/view/instructnav">InstructNav</a>
<li>[2024/09] Three papers get accepted to CoRL 2024: <a href="https://sites.google.com/view/instructnav">Generic Instruction Navigation</a>, <a href="https://sites.google.com/view/aic-mllm">Interactive Correction for Manipulation</a>, <a href="https://arxiv.org/pdf/2406.07549">Articulation-Aware VLM</a></li>
<li>[2024/09] One paper gets accepted to <a href="https://www.nature.com/articles/s42256-024-00879-7#:~:text=In%20this%20section,%20we%20introduce%20the%20results%20of%20our%20method">Nature Machine Intelligence</a></li>
<li>[2024/09] One paper gets accepted to RAL</li>
<li>[2024/08] Call for Papers: <a href="https://onlinelibrary.wiley.com/page/journal/15564967/homepage/call-for-papers/si-2024-000757">Special Issues on Embodied AI in Journal of Field Robotics</a></li>
<li>[2024/07] Two papers get accepted to ECCV 2024: <a href="https://jiyao06.github.io/Omni6DPose/">Omni6DPose</a>, <a href="https://arxiv.org/pdf/2407.15771v1">Grasping</a> </li>
<details><summary><b><font color="1367a7"> show more </font></b></summary>
<li>[2024/06] Three papers get accepted to IROS 2024:<a href="https://air-discover.github.io/PreAfford/">Pre-grasping</a>, <a href="https://arxiv.org/pdf/2403.11289">ManipVQA</a>, <a href="https://github.com/Yaser-wyx/SCANet">Lego Assembly</a> </li>
<li>[2024/06] CVPR 2024 Embodied AI Workshop <a href="https://prsorg.github.io">PRS Challenge: Human-centered In-building Embodied Delivery</a> </li>
<li>[2024/05] Two papers get accepted to RSS 2024 </li>
<li>[2024/04] Our RGB-based object grasping paper is accepted to RAL 2024 </li>
<li>[2024/02] Three papers get accepted to CVPR 2024 </li>
<li>[2024/01] Five papers get accepted to ICRA 2024 </li>
<li>[2024/01] I received the <a href="images/award/2023 MIR Oustanding Associate Editor Award-Hao Dong.pdf">MIR Outstanding Associate Editor Award</a> </li>
<li>[2024/01] Two papers get accepted to ICLR 2024:
<a href="https://helloqxwang.github.io/SparseDFF/">SparseDFF</a> and <a href="https://arxiv.org/abs/2305.03048">PerSAM</a>
</li>
<li>[2023/12] One paper gets accepted to PAMI and two papers for AAAI 2024
<a href="https://bi-dexhands.ai">Bi-DexHands</a>, <a href="https://arxiv.org/pdf/2305.16318.pdf">MUTR</a> and <a href="https://arxiv.org/pdf/2312.12340.pdf">FractureAssembly</a>
</li>
<li>[2023/09] Five NeurPS 2023 submissions are all accepted:
<a href="https://sites.google.com/view/demand-driven-navigation">Demand-driven Navigation</a>,
<a href="https://sites.google.com/view/genpose">GenPose</a>,
<a href="https://sites.google.com/view/graspgf">GraspGF</a>,
<a href="https://chengkaiacademycity.github.io/EnvAwareAfford/">EnvAwareAfford</a> and
<a href="https://tritiumr.github.io/Where2Explore/">Where2Explore</a></li>
<li>[2023/09] I will serve as an associate editor of ICRA </li>
<li>[2023/08] One paper gets accepted to SIGGRAPH Asia, and two papers for BMVC </li>
<li>[2023/07] Two papers get accepted to ICCV 2023:
<a href="https://hyperplane-lab.github.io/DeformableAffordance/">DefoAfford</a> and
<a href="https://crtie.github.io/SE-3-part-assembly/">3D Shape Assembly</a>
</li>
<li>[2023/06] I will serve as an AC of CVPR 2024 </li>
<li>[2023/06] I will serve as a SPC of AAAI 2024 </li>
<li>[2023/04] Our <a href="https://sites.google.com/view/sasavan/">visual-audio navigation</a> gets accepted to RAL</li>
<li>[2023/03] I will serve as an AC of NeurIPS 2023 </li>
<li>[2023/02] Three paper get accepted to CVPR 2023</li>
<!-- <li>[2023/02] Our <a href="http://www.tensorlayerx.com/index_en.html?chlang=&langid=2">TensorLayerX</a> won the OpenI Outstanding Open Source Project Award 2022</li> -->
<!-- <li>[2023/01] Three papers get accepted to ICRA, ICLR and AAAI respectively</li> -->
<!-- <li>[2022/12] We won the Dual Object Manipulation Track of MyoChallenge @ NeurIPS 2022</li> -->
<!-- <li>[2022/11] Our metasurface indoor robotic paradigm gets accepted to National Science Review</li> -->
<!-- <li>[2022/10] I will serve as a co-chair of Learning Robot Manipulation Forum @ PRCV 2022</li> -->
<!-- <li>[2022/09] Two papers get accepted to NeurIPS 2022</li> -->
<!-- <li>[2022/09] I will serve as an AC of CVPR 2023 </li> -->
<!-- <li>[2022/07] I will serve as a SPC of AAAI 2023 </li> -->
<!-- <li>[2022/07] Two papers get accepted to ECCV 2022 </li> -->
<!-- <li>[2022/07] I will serve as a co-chair for <a href="https://neurips-hill.github.io">Human in the Loop Learning (HiLL) Workshop</a> @ NeurIPS 2022 </li> -->
<!-- <li>[2022/06] I serve as an associate editor of Machine Intelligence Research </li> -->
<!-- <li>[2022/06] One paper gets accepted to IROS 2022 </li> -->
...
</details>
<!-- <li>[2022/05] Our open-source ML system book is released @ <a href="https://openmlsys.github.io">OpenMLsys</a> </li> -->
<!-- <li>[2022/05] I will serve as an organizer of <a href="http://saferl.online/2022">SafeRL Workshop</a> @ <a href="https://www.mfi2022.com">MFI</a> 2022 </li> -->
<!-- <li>[12/2021] 科技部重大项目牵头人 </li> -->
<!-- <li>[12/2020] Panel discussion at WAVE Summit</li> -->
<!-- <li>[11/2020] 我们的�ᅣ1�71ᅣ1�77�深度强化学习:基础、研究与应用 》中文书将在2021年夏天出版,敬请关注</li> -->
<!-- <li>[11/2020]「Talk」百庄1�71ᅣ1�77 《开源开放平台建设�ᅣ1�71ᅣ1�77ᅣ1�71ᅣ1�77 -->
<!-- <li>[08/2020] We won the <a href="paper/2020power_gride_winner_diploma.jpg">CityLearn Challenge 2020</a>, reducing 13% cost of building energy</li> -->
<!-- <li>[08/2020] TensorLayer 3.0.0 will supports multiple backends, such as TensorFlow, MindSpore and more, supporting GPU and Huawei-Ascend. Stay tuned!</li> -->
<!-- <li>[07/2020] Our <a href="https://deepreinforcementlearningbook.org/index.html#mailing-list">DRL book</a> is published! </li> -->
<!-- <li>[07/2020]「Talk」中科院文献情报中心 《信息科学技术的弄1�71ᅣ1�77源开放与知识传播〄1�71ᅣ1�77 -->
<!-- <li>[06/2020]「Talk」人工智能大伄1�71ᅣ1�77 - 弄1�71ᅣ1�77发�ᅣ1�71ᅣ1�77�日 机器之心 WAIC《人工智能与弄1�71ᅣ1�77源开放�ᅣ1�71ᅣ1�77ᅣ1�71ᅣ1�77 -->
<!-- <li>[06/2020]「Talk」Tsinghua University - From Deep Generation to Creation -->
<!-- <li>[08/2019] I graduated from Imperial and joined PKU. </li> -->
<!-- <li>[06/2019] Release <a href="https://github.com/tensorlayer/tensorlayer/tree/master/examples/reinforcement_learning">RL Model Zoo</a> for teaching and research. </li> -->
<!-- <li>[05/2019] Release <a href="https://github.com/tensorlayer/tensorlayer/releases/tag/2.0.0">TensorLayer 2.0</a> ! A BIG Updated!</li> -->
<!-- <li>[05/2019]「Talk」GAMES 2019, Introduction of Generative Adversarial Networks. </li> -->
<!-- <li>[04/2019]「Talk」Invited talk, <a href="https://mp.weixin.qq.com/s/gXzayHO3Wtz4OJ61saF90A">"Deep Learning & Data Efficiency"</a> by CFCS, Peking University. </li> -->
<!-- <li>[12/2018] TensorLayer will give a demo of "Learning to Dance via Machine Learning" at NeurIPS. Montréal, Dec 4 2018 <a href="https://NeurIPS.cc/Conferences/2018/Schedule?showEvent=12183">(click)</a> </li> -->
<!-- <li>[12/2018]「Talk」TensorLayer give a talk at <a href="https://devfest.gdg.london">Google Developer Groups (GDG) DevFest</a>. London, Dec 1 2018 </li> -->
<!-- <li>[03/2018] Teaching Assistant of "Advanced Machine Learning" @ Imperial College</li> -->
<!-- <li>[02/2018] I gave a talk at <a href="https://github.com/tensorlayer/tensorlayer-chinese/blob/master/docs/LPN_AI_symposium_handbook_poster.pdf">London PhD Network AI Symposium</a> with DeepMind, UCL, and Francis Crick Institute (<a href="https://github.com/tensorlayer/tensorlayer-chinese/blob/master/docs/TensorLayer_London_PhD_Network_20180212.pdf">slides</a>) </li> -->
<!-- <li>[01/2018] Published my 1st <a href="http://www.broadview.com.cn/book/5059">Chinese Deep Learning Book</a> </li> -->
<!-- <li>[12/2017] Interviewed by ClusterOne - “Humans of AI 1�71ᅣ1�771ᅣ1�71ᅣ1�777 series : <a href="https://clusterone.com/blog/hao-dong">TensorLayer and the Chinese Deep Learning Community</a> </li> -->
<!-- <li>[10/2017] We won the <a href="paper/ACM MM Certification.pdf">Best Open Source Software Award</a> @ACM MM 2017 </a> </li> -->
<!-- <li>[07/2017] TensorLayer is accepted by ACM Multimedia'17. It has quickly gained over 2000+ stars on <a href="https://github.com/zsdonghao/tensorlayer">Github</a>! </li> -->
<!-- <li>[06/2017] New ICCV Paper : <a href="https://arxiv.org/abs/1707.06873">Semantic Image Synthesis via Adversarial Learning</a> </li> -->
<!-- <li>[05/2016] Deputy leader of the machine learning group in <a href="https://www.imperial.ac.uk/data-science">Data Science Institute</a> at Imperial College </li> -->
<!-- <li>[04/2016]「Talk」Invited talk, “Introduction of Artificial Neural Network 1�71ᅣ1�771ᅣ1�71ᅣ1�777 at Imperial College </li> -->
<!-- <li>[09/2016] TensorLayer is released. It has quickly gained over 2000+ stars on <a href="https://github.com/zsdonghao/tensorlayer">Github</a>! </li> -->
</div>
<hr>
<!-- <ul>
<li>
<span style="font-weight:bold;">Open positions available (
<a href="recurit/phd.html">Ph.D</a>,
<a href="recurit/postdoc.html">Post Doc</a>,
<a href="recurit/engineer.html">Engineers</a>,
Interns ). Contact me for details.</span>
</li>
</ul> -->
<!-- <hr> -->
<!-- <div class='section_div'>
<h2>Postdocs and Students </h2>
<li>PhD: <a href="https://warshallrho.github.io">Ruihai Wu</a>, <a href="https://xxx.github.io">Mingdong Wu</a><br></li>
<li>Postdoc: <a href="https://xxx.github.io">Lin Dong</a><br></li>
</div>
<hr> -->
<div id="lab" class='section_div' id='ResearchGroup'>
<h2>PKU-Agibot Lab</h2>
<!-- We study how to improve the learning ability of artificial intelligence systems. -->
<!-- We are especially interested in <b>vision</b> and <b>robotics</b>, -->
<!-- recreating and interacting the world. -->
<!-- the current topic is generalisable and autonomous robot learning, with the goal to improve the reliability and functionality of AI systems. -->
<!-- the current topic is self-supervised and model-based robot learning, with the goal to improve the learning ability of artificial intelligence systems. -->
<!--include <b>3D vision</b>, <b>generative models</b> and <b>robotics</b>.-->
<!-- We have broader interests in medical data analysis and computer graphics. -->
<!-- <br><br> -->
<!-- We also enjoy developing and maintaining open source projects, e.g., <a href="https://github.com/tensorlayer">TensorLayer</a> and <a href="https://github.com/openmlsys">OpenMLsys</a>, -->
<!-- a deep learning and reinforcement learning library for scientists and engineers, which -->
<!-- and won the <a href="paper/ACM MM Certification.pdf">Best Open Source Software Award</a> at ACM Multimedia 2017. -->
<!-- We are now developing <b>new projects</b> for AI, please contact us for the detail. -->
<!-- <br><br> -->
<!-- Our lab, affiliated with the <a href="http://cfcs.pku.edu.cn">CFCS</a> and the <a -->
<!-- href="https://cs.pku.edu.cn/English/Home.htm">School of CS</a> at PKU, -->
Our lab welcomes research interns, masters, PhD candidates and postdocs. The current research interests include: </i> <br>
<!-- Our lab's name, 'Lab', reflects our commitment to exploring diverse and evolving frontiers in AI and robotics, allowing our research to adapt and grow as the field evolves. -->
<li>grasping and manipulation<br>
<li>task planning<br>
<li>navigation<br>
<li>safety and interpretability in robotics<br>
<!-- <br> -->
<!-- <br> -->
<!-- I also have the leading of the Embodied AI center at the Beijing Academy of Artificial Intelligence (<a href="https://baai.ac.cn/english.html">BAAI</a> 北京智源), where we are also actively seeking for research scientists, engineers, and interns. -->
<!-- Additionally, I am also honored to be a member of PengCheng Lab (<a href="https://pcl.ac.cn">PCL</a> 鹏城实验室), where I contribute to a variety of exciting open-source projects. -->
For more information, please contact Hao Dong at <i>hao.dong (a) pku.edu.cn</i>
<br>
<!-- <details><summary><b><font color="1367a7"> lab members</font></b></summary>
<table class='personnel'>
<tr>
<td rowspan="2" width=350px>
<b>PhD Students</b><br>
<a href="http://warshallrho.github.io/">Ruihai Wu</a> <font size="2" color='grey'> <I>2020</I></font> <br>
<a href="https://aaronanima.github.io/">Mingdong Wu </a> <font size="2" color='grey'> <I>2021</I></font> <br>
<a href="https://tianhaowuhz.github.io">Tianhao Wu</a> <font size="2" color='grey'> <I>2021</I></font> <br>
<a href="https://sxy7147.github.io">Yan Shen</a> <font size="2" color='grey'> <I>2022</I></font> <br>
<a href="https://whcpumpkin.github.io">Hongcheng Wang</a> <font size="2" color='grey'> <I>2022</I></font> <br>
<a href="https://jiyao06.github.io">Jiyao Zhang </a> <font size="2" color='grey'> <I>2023</I></font> <br>
<a href="https://clorislili.github.io/clorisLi">Xiaoqi Li </a> <font size="2" color='grey'> <I>2023</I></font> <br>
<a href="https://">Yang Tian </a> <font size="2" color='grey'> <I>2023</I></font> <br>
<a href="https://lyx0501.github.io">Yuxing Long </a> <font size="2" color='grey'> <I>2024</I></font> <br>
<a href="https://yuanfei-wang.github.io">Yuanfei Wang </a> <font size="2" color='grey'> <I>2024</I></font> <br>
</td>
<td width=320px>
<b>MSc Students</b><br>
<a href="https://">Yunchong Gan </a> <font size="2" color='grey'> <I>2021</I></font> <br>
<a href="https://">Taewhan Kim </a> <font size="2" color='grey'> <I>2022</I></font> <br>
<a href="https://">Zichen Zhang </a> <font size="2" color='grey'> <I>2023</I></font> <br>
<a href="https://">Fei Hu </a> <font size="2" color='grey'> <I>2023</I></font> <br>
<a href="https://">Hisham Barakat </a> <font size="2" color='grey'> <I>2023</I></font> <br>
<a href="https://">Yaroslav Ponomarenko </a> <font size="2" color='grey'> <I>2023</I></font> <br>
</td>
</tr>
</table> -->
<!-- <tr>
<td>
<b>Affiliates and Collaborators</b><br>
<a href="http://people.csail.mit.edu/jahanian/">Ali Jahanian</a>, <a href="https://people.eecs.berkeley.edu/~shelhamer/">Evan Shelhamer</a>,
<a href="https://www.alexandonian.com/">Alex Andonian</a>, Kexin Yi, <a href="https://people.csail.mit.edu/xavierpuig/">Xavier Puig</a>,
<a href="http://www.mit.edu/~lishuang/">Shuang Li</a>, <a href="https://people.csail.mit.edu/davidbau/home/">David Bau</a>,
<a href="https://ps.is.mpg.de/person/jwulff">Jonas Wulff</a>, <a href="http://people.csail.mit.edu/ganchuang/">Chuang Gan</a>,
<a href="http://www.sabrina-osmany.com/about">Sabrina Osmany</a>
</td>
<td>
<b>Former Members and Visitors</b><br>
<a href="http://kvfrans.com/">Kevin Frans</a> (UROP 2018-2020), <a href="https://yilundu.github.io/">Yilun Du</a> (UROP 2019), Zhongxia Yan (Rotation 2019)
</td>
</tr> -->
<!-- <table class='personnel'>
<td>
<b>Former Interns</b><br>
Guanqi Zhan <font size="2" color='grey'> <I>BS@PKU -> PhD@Oxford</I></font>
Mingxin Yu <font size="2" color='grey'> <I>BS@PKU -> PhD@MIT</I></font>
Junning Shao <font size="2" color='grey'> <I>BS@PKU -> PhD@THU</I></font>
Zihan Ding <font size="2" color='grey'> <I>MSc@Imperial -> PhD@Princeton</I></font> <br>
Jiahao Huang <font size="2" color='grey'> <I>BS@BIT -> PhD@Imperial</I></font>
Jialei Huang <font size="2" color='grey'> <I>BS@PKU -> PhD@THU</I></font>
Haoqi Yuan <font size="2" color='grey'> <I>BS@PKU -> PhD@PKU</I></font>
Andrew Zhao <font size="2" color='grey'> <I>BS@UBC -> PhD@THU</I></font> <br>
Yihao Zhao <font size="2" color='grey'> <I>BS@PKU -> PhD@PKU</I></font>
Bingchan Zhao <font size="2" color='grey'> <I>BS@PKU -> PhD@PKU</I></font>
Lan Lyu <font size="2" color='grey'> <I>BS@PKU -> MSc@CMU</I></font>
Minghang Zheng <font size="2" color='grey'> <I>BS@PKU -> PhD@PKU</I></font> <br>
Zizheng Guo <font size="2" color='grey'> <I>BS@PKU -> PhD@PKU</I></font>
Jie Ren <font size="2" color='grey'> <I>BS@XDU -> PhD@Edin.</I></font>
Yian Wang <font size="2" color='grey'> <I>BS@PKU -> PhD@UMass</I></font>
Kejian Shi <font size="2" color='grey'> <I>MSc@Imperial -> PhD@CUHK</I></font> <br>
Lin Dong <font size="2" color='grey'> <I>PostDoc@PKU -> Associate Prof in AI@CUPES</I></font>
<br> <font color="#ff0000">招收校内外实习生 (gap year, 研究生等),对博后、RA和外地访问学生提供充足补贴</font>
</td>
</table> -->
<!-- </details> -->
</div>
<hr>
<!--
<div class='section_div'>
<h2>News</h2>
<p>▻ New blog post on our work on <a href="https://blog.openai.com/evolved-policy-gradients/">Evolved Policy Gradients</a>.<br>
▻ I am co-organizing a <a href="https://sites.google.com/view/cvpr2018tutorialongans/">tutorial on GANs at CVPR 2018</a>.</p>
<p>I recently co-organised the <a href="http://vui.eecs.berkeley.edu/">2nd Workshop on Visual Understanding for Interaction</a> at CVPR 2017. Talk slides coming soon!</p>
</div>
<hr>
-->
<div class='section_div'>
<h2>Services</h2>
<li> Area Chair: NeurIPS (2023, 2024), CVPR (2023, 2024) </li>
<li> Senior Program Committee: AAAI (2023, 2024) </li>
<li> Associate Editor: ICRA, Machine Intelligence Research </li>
<!-- <li class="service1">Open-Source Organisation: <a href="https://github.com/tensorlayer">TensorLayer</a> and <a href="https://github.com/openmlsys">OpenMLsys</a> communities</li> -->
<!-- <li class="service2">Journal Organisation
<ul>
<li> Associate Editor of Machine Intelligence Research
</ul>
</li> -->
<!-- <details><summary><b><font color="1367a7"> show more</font></b></summary> -->
<!-- <li> Conference & Workshop Organisation
<ul>
<li> Co-chair of Learning Robot Manipulation Forum @ PRCV 2022
<li> Co-chair of <a href="https://neurips-hill.github.io">Human in the Loop Learning (HiLL) Workshop</a> @ NeurIPS 2022
<li> Organiser of <a href="http://saferl.online/2022">International Workshop on Safe Reinforcement Learning Theory and its Applications</a> @ IEEE International Conference on Multisensor Fusion and Integration (MFI) 2022
<li> Co-chair of <a href="https://bda4s.github.io"> International Workshop on Big Data Analytics for Sustainability</a> @ IEEE International Conference Big Data 2022
<li> Technical Program Committee of <a href="http://www.ieee-cybermatics.org/2022/dependsys/index.html"> IEEE DependSys 2022</a>
</ul>
</li> -->
<!-- <li >Organizer of the 1st International Workshop on Safe Reinforcement Learning Theory and its Applications at IEEE International Conference on Multi-sensor Fusion and Integration (MFI) 2022</li> -->
<!-- <li>Co-chair of the 2nd International Workshop on Big Data Analytics for Sustainability at IEEE International Conference Big Data 2022</li> -->
<!-- <li> Peer-reviewed Journals and Conferences
<ul>
<li> Associate Editor, ICRA (24) </li>
<li> Area Chair, NeurIPS (23) </li>
<li> Area Chair, CVPR (23, 24) </li>
<li> Senior Program Committee, AAAI (23, 24) </li>
<li> Reviewer, ICLR (23) </li>
<li> Reviewer, NeurIPS Dataset and Benchmark (22) </li>
<li> Reviewer, NeurIPS (21,22) </li>
<li> Reviewer, CoRL (20,21,22) </li>
<li> Reviewer, IROS (20,21,22) </li>
<li> Reviewer, CVPR (21,22) </li>
<li> Program Committee, AAAI (22) </li>
<li> Reviewer, ICRA (22) </li>
<li> Reviewer, ICCV (21) </li>
<li> Reviewer, SIGGRAPH Asia (20) </li>
<li> Reviewer, MICCAI (20) </li>
<li> Reviewer, China CAD&CG (20) </li>
<li> Reviewer, EuroGRAPHICS (20) </li>
<li> Reviewer, PAMI (19) </li>
<li> Reviewer, SIGGRAPH (19) </li>
<li> Reviewer, TIP (18) </li>
<li> Reviewer, TKDE (18) </li>
<li> Reviewer, PLUS ONE (18) </li>
<li> Reviewer, Neurocomputing (17) </li>
</ul>
</li> -->
<!-- </ul> -->
<!-- </details> -->
<!-- <li>Program Commitee of IEEE CBMS</li> -->
<!-- <a class="btn btn_one" onclick="servicesShow()">[ <span> <b>show more</b></span> ]</a> -->
</div>
<hr>
<div class='section_div'>
<h2>Courses</h2>
<li><a href="courses/index_foundamentals_of_AI.html">Foundamentals of AI</a> (Spring Term 2023 - )<br>
<li><a href="courses/index_introduction_to_computing_A.html">Introduction to Computing (A)</a> (Fall Term 2022 - )<br></li>
</li>
<details><summary><b><font color="1367a7"> previous courses</font></b></summary>
<li><a href="https://deep-generative-models.github.io">Deep Generative Models</a> (Spring Term 2020 - 2022)<br>
<li><a href="courses/index_introduction_to_computing_B.html">Introduction to Computing (B)</a> (Fall Term 2020 - 2021)<br></li>
<li><a href="http://elective.pku.edu.cn/elective2008/edu/pku/stu/elective/controller/courseDetail/getCourseDetail.do?kclx=BK&course_seq_no=BZ1920104833460_15539">Study and Practice on Topics of Frontier Computing (I)</a> (Autumn Term 2019)<br></li>
<li><a href="courses/deep-learning/2019_introduction_deep_learning.html">Introduction to Deep Learning (Turing Class)</a> (Summer Term 2019)<br></li>
</details>
</div>
<hr>
<!-- <div class='section_div'>
<h2>Team</h2>
<li><a href="https://warshallrho.github.io">Ruihai Wu</a> Ph.D. 2020~Now<br></li>
<li><a href="https://github.com/zjduan">Zhijian Duan</a> Ph.D. 2020~Now<br></li>
</div>
<hr> -->
<!-- <div class='section_div'>
<h2>Team</h2>
<a href="https://warshallrho.github.io">Ruihai Wu</a> (Ph.D 2019)<br>
<a href="https://warshallrho.github.io">Jiabin Liu</a> (Post Doc 2019)<br>
</div>
<hr> -->
<div class='section_div'>
<!--
<h2>Publications</h2>
For all publications please check my <a href="https://scholar.google.co.uk/citations?hl=en&user=xLFL4sMAAAAJ">Google Scholar</a> and
<a href="https://www.researchgate.net/profile/Hao_Dong35">Research Gate</a> </a>.
<br><br> -->
<table class="pub_table">
<!--
<tr><td class="sub_heading">Selected Papers<hr></td></tr>
<tr>
<td class="pub_td1"><div class="teaser_img_div"><a href="https://arxiv.org/abs/1707.06873"><img class="teaser_img" src='images/paper/2017iccv_sisgan.png'/></a></div></td>
<td class="pub_td2"><b>SisGAN: Semantic Image Synthesis via Adversarial Learning</b> <br> <i> ---Image Manipulation with Natural Language</i> <br>Hao Dong, Simiao Yu, Chao Wu, Yike Guo<br><i>International Conference on Computer Vision (ICCV) 2017</i><br>[<a href="https://arxiv.org/abs/1707.06873">Paper</a>]
</td></tr>
<tr>
<td class="pub_td1"><div class="teaser_img_div"><a href="http://github.com/tensorlayer/tensorlayer/"><img class="teaser_img" src='images/paper/2017acmmm_tensorlayer.png'/></a></div></td>
<td class="pub_td2"><b>TensorLayer: A Versatile Library for Efficient Deep Learning Development</b><br>Hao Dong, Akara Supratak, Luo Mai, Fangde Liu, Axel Oehmichen, Simiao Yu, Yike Guo<br><i>ACM Multimedia (MM) 2017 (Winner of the Best Open Source Software Award)</i><br>[<a href="https://arxiv.org/abs/1707.08551">Paper</a>] [<a href="http://github.com/tensorlayer/tensorlayer/">Code</a>] [<a href="http://github.com/tensorlayer">Organisation</a>] [<a href="http://tensorlayer.readthedocs.io">Documentation</a>]
</td></tr>
<tr>
<td class="pub_td1"><div class="teaser_img_div"><a href="https://arxiv.org/abs/1610.06421"><img class="teaser_img" src='images/paper/2017tnsre_mnn.png'/></a></div></td>
<td class="pub_td2"><b>Mixed Neural Network Approach for Temporal Sleep Stage Classification</b><br>Hao Dong, Akara Supratak, Wei Pan, Chao Wu, Paul M Matthews, Yike Guo<br><i>IEEE Trans. on Neural Systems and Rehabilitation Eng. (TNSRE) 2017</i><br>[<a href="https://arxiv.org/abs/1610.06421">Paper</a>]
</td></tr>
<tr>
<td class="pub_td1"><div class="teaser_img_div"><a href="https://arxiv.org/abs/1711.07520"><img class="teaser_img" src='images/paper/2017tifs_drop.png'/></a></div></td>
<td class="pub_td2"><b>Dropping Activation Outputs with Localized First-layer Deep Network for Enhancing User Privacy and Data Security</b><br>Hao Dong, Chao Wu, Wei Zhen, Yike Guo<br><i>IEEE Trans. on Inform. Forensics and Security (TIFS) 2018</i><br>[<a href="https://arxiv.org/abs/1711.07520">Paper</a>]
</td></tr> -->
<tr>
<td class="sub_heading">Books
<hr>
</td>
</tr>
<tr>
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://deepreinforcementlearningbook.org"><img class="teaser_img"
src='images/paper/2020drl_book_cover_v3-min.png' /></a></div>
</td>
<td class="pub_td2"><b>Deep Reinforcement Learning: Fundamentals, Research and Applications</b>
<br>Hao Dong, Zihan Ding, Shanghang Zhang <i>Eds.</i> <br><i>Springer Nature 2020 ISBN 978-981-15-4094-3</i>
<!--, 1st ed.</i>-->
<br><i><font color="#1367a7">--- A Selection of the High-impact Publications in CS by Chinese Researchers from Springer Nature</font></i>
<details><summary><b><font color="1367a7">Chinese version</font></b></summary>
<!-- <p> - 测试 测试测试</p>
<pre><code> title,value,callBack可以缺省 </code> </pre> -->
<b>深度强化学习:基础、研究与应用</b> 董豪、丁子涵、仉尚航 等著(简体中文译本 Simplified Chinese)<br> <i>电子工业出版社 2021 ISBN
978-7-121-41188-5</i>
<br><b>新一代AI霸主 - 深度強化學習</b> 董豪、丁子涵、仉尚航 等著(繁體中文譯本 Traditional Chinese)<br> <i>深智數位 2022 ISBN
978-986-0776-82-9</i>
</details>
[<a href="https://deepreinforcementlearningbook.org">Free Open Source Book</a>] [<a
href="https://link.springer.com/book/10.1007%2F978-981-15-4095-0#editorsandaffiliations">Springer <button type="xx">250k Downloads</button></a>]
[<a href="http://www.broadview.com.cn/book/6544">Broadview</a>] [<a
href="https://deepmind.com.tw">繁体版本</a>] [<a
href="https://search.jd.com/Search?keyword=深度强化学习%20董豪&enc=utf-8&suggest=1.def.0.base&wq=深度强化学习:基&pvid=3481b24e95ae4b86ba80128820fd563c">京东</a>]
</td>
</tr>
<tr>
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://openmlsys.github.io"><img class="teaser_img"
src='images/paper/2023openmlsys.png' /></a></div>
</td>
<td class="pub_td2"><b>Machine Learning System: Design and Implementation</b><br>Luo Mai, Hao
Dong <i>Eds.</i> <i>Springer Nature 2024 coming soon</i> <br>
<details><summary><b><font color="1367a7">Chinese version</font></b></summary>
<b>机器学习系统:设计与实现 </b> 麦络、董豪 <i>等著</i> <br>
<i>清华大学出版社 Tsinghua University Press 2023 ISBN 978-7-302-63007-4</i>
</details>
<!-- <br> -->
[<a href="https://github.com/openmlsys">OpenMLsys Github</a><a href="https://github.com/openmlsys"><img alt="GitHub Stars" style="vertical-align:middle" src="https://img.shields.io/github/stars/openmlsys?style=social"/></a>]
[<a href="https://openmlsys.github.io/html-en/">English Open Source Book (coming soon)</a>]
[<a href="https://openmlsys.github.io">Chinese Open Source Book</a>]
[<a href="https://search.jd.com/Search?keyword=机器学习系统:设计与实现&enc=utf-8&wq=机器学习系统:设计与实现&pvid=526e181cdf58456d842f3580fff9f1d5">京东</a>]
</td>
</tr>
<!-- <div id="MoreBook"> -->
<!-- <table id='table1'> -->
<!-- <tr><td class="sub_heading"><hr></td> -->
<!-- <tr class="level_0"><td class="company haschild"><p class="c_title" style="color:#1367a7;"><b>Show more ...</b></p></td> -->
<!-- <tr class="level_1">
<td class="pub_td1">
<div class="teaser_img_div"><a href="http://www.broadview.com.cn/book/5059"><img class="teaser_img"
src='images/paper/2018phei_tensorlayer_book3-min.png' /></a></div>
</td>
<td class="pub_td2"><b> 深度学习:一起玩转TensorLayer(Deep Learning using TensorLayer)</b><br>Hao Dong, Yike Guo,
Guang Yang et al<br><i>电子工业出版社 Publishing House of Electronics Industry 2018 ISBN:
9787121326226</i><br>[<a
href="https://www.amazon.com/深度学习-丄1�71ᅣ1�771ᅣ1�71ᅣ1�777起玩转TensorLayer-董豪-筄1�71ᅣ1�771ᅣ1�71ᅣ1�777/dp/B078YDZTCY/ref=sr_1_2?keywords=tensorlayer&qid=1570048255&s=gateway&sr=8-2">Amazon</a>]
[<a
href="https://search.jd.com/Search?keyword=tensorlayer&enc=utf-8&wq=tensorl&pvid=555e73b10c134c339afddc63c7ecdd8a">京东</a>]
[<a href="http://www.broadview.com.cn/book/5059">Broadview</a>] [<a
href="https://github.com/tensorlayer/chinese-book">Code</a>]
[<a href="http://github.com/tensorlayer">Organisation</a>] [<a
href="http://tensorlayer.readthedocs.io">Documentation</a>]
</td>
</tr> -->
<!-- <tr class="level_1">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://link.springer.com/chapter/10.1007/978-3-319-50478-0_8"><img
class="teaser_img" src='images/paper/2016springer_survey_v2-min.png' /></a></div>
</td>
<td class="pub_td2"><b>Chapter: Survey on Feature Extraction and Applications of Biosignals</b><br>Akara
Supratak, Chao Wu, Hao Dong, Kai Sun, Yike Guo<br><i>Machine Learning for Health Informatics, Springer,
Page 161-182 2016</i><br>[<a
href="https://link.springer.com/chapter/10.1007/978-3-319-50478-0_8">Springer</a>]
</td>
</tr>
-->
<!-- <tr>
<td></td>
<td><a class="btn btn_two" onclick="trShow()">[ <span><b>show more</b></span> ]</a></td>
</tr> -->
<!-- </tr> -->
<!-- </table> -->
<!-- </div> -->
<!-- <button onclick=document.all["morebook"].style.display="none">show less</button>
<button onclick=document.all["morebook"].style.display="block">show more</button> -->
<!-- Recent Papers -->
<tr>
<td class="sub_heading">Papers
<hr>
</td>
<td>
(
<a class="all_Btn" onclick="showInfo(1)"> <span><b>show recent selected</b></span> </a> /
<!-- <a class="all_Btn" onclick="showInfo(2)"> <span>Before 2020</span> </a> / -->
<a class="all_Btn" onclick="showInfo(3)"><span><b>show more</b></span></a>
)
</td>
<!-- <tr>
<td class="pub_td1"><div class="teaser_img_div"><a href="https://arxiv.org/pdf/20xx.xxx.pdf"><img class="teaser_img" src='images/paper/2021pmoe-min.png'/></a></div></td>
<td class="pub_td2"><b>Probabilistic Mixture-of-Experts for Efficient Deep Reinforcement Learning</b><br>Jie Ren*, Yewen Li*, Zihan Ding, Wei Pan, Hao Dong<br><i>arXiv 20xx.xxx</i><br>[<a href="https://arxiv.org/pdf/20xxxx.pdf">Paper</a>]
</td></tr> -->
<tr class="paper1">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://arxiv.org/pdf/2409.17549"><img class="teaser_img"
src='images/paper/2024-3DTacDex-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>Canonical Representation and Force-Based Pretraining of 3D Tactile for Dexterous Visuo-Tactile Policy Learning</b>
<br>Tianhao Wu, Jinzhou Li, Jiyao Zhang, Mingdong Wu, Hao Dong<br>
<i>arXiv 2024</i><br>
[<a href="https://arxiv.org/pdf/2409.17549">Paper</a>]
[<a href="https://3dtacdex.github.io">Webpage</a>]
</td>
</tr>
<tr class="paper2">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://arxiv.org/pdf/2406.13642"><img class="teaser_img"
src='images/paper/2024SpatialBot-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>SpatialBot: Precise Spatial Understanding with Vision Language Models</b>
<br>Wenxiao Cai, Yaroslav Ponomarenko, Jianhao Yuan, Xiaoqi Li, Wankou Yang, Hao Dong, Bo Zhao<br>
<i>arXiv 2024</i><br>
[<a href="https://arxiv.org/pdf/2406.13642">Paper</a>]
[<a href="https://github.com/BAAI-DCAI/SpatialBot">Code</a>]
[<a href="https://mp.weixin.qq.com/s/X1iqkkEMsop9DGCY08AfCw">机器之心</a>]
</td>
</tr>
<tr class="paper2">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://www.nature.com/articles/s42256-024-00879-7#:~:text=In%20this%20section,%20we%20introduce%20the%20results%20of%20our%20method"><img class="teaser_img"
src='images/paper/2024NMI-RL.jpg' /></a></div>
</td>
<td class="pub_td2"><b>Efficient and Scalable Reinforcement Learning for Large-scale Network Control</b>
<br>Chengdong Ma, Aming Li, Yali Du, Hao Dong, Yaodong Yang<br>
<i>Nature Machine Intelligence (NMI) 2024</i><br>
[<a href="https://www.nature.com/articles/s42256-024-00879-7#:~:text=In%20this%20section,%20we%20introduce%20the%20results%20of%20our%20method">Paper</a>]
[<a href="https://app.xinhuanet.com/news/article.html?articleId=b0c7469d95e5d02dd0c99aad3aa57f95×tamp=21907">新华网</a>]
[<a href="https://www.stdaily.com/web/gdxw/2024-09/03/content_223871.html">科技日报</a>]
</td>
</tr>
<tr class="paper1">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://garmentlab.github.io"><img class="teaser_img"
src='images/paper/2024NeurIPS-GarmentLab-min.png' /></a></div>
</td>
<td class="pub_td2"><b>GarmentLab: A Unified Simulation and Benchmark for Garment Manipulation</b>
<br>Haoran Lu, Yitong Li, Ruihai Wu, Sijie Li, Ziyu Zhu, Chuanruo Ning, Yan Shen, Longzan Luo, Yuanpei Chen, Hao Dong<br>
<i>Neural Information Processing System (NeurIPS) 2024</i><br>
[Paper]
[<a href="https://garmentlab.github.io">Webpage</a>]
[<a href="https://github.com/GarmentLab">Code</a>]
[<a href="https://garmentlab.readthedocs.io/en/latest/">Docs</a>]
</td>
</tr>
<tr class="paper1">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://XXXX"><img class="teaser_img"
src='images/paper/2024NeurIPS-MO-DDN-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>MO-DDN: A Coarse-to-Fine Attribute-based Exploration Agent for Multi-Object Demand-driven Navigation</b>
<br>Hongcheng Wang, Peiqi Liu, Wenzhe Cai, Mingdong Wu, Zhengyu Qian, Hao Dong<br>
<i>Neural Information Processing System (NeurIPS) 2024</i><br>
[<a href="https://arxiv.org/pdf/2410.03488v1">Paper</a>]
[<a href="https://sites.google.com/view/moddn">Webpage</a>]
[<a href="https://github.com/whcpumpkin/MO-DDN">Code</a>]
</td>
</tr>
<tr class="paper2">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://arxiv.org/pdf/2406.17898"><img class="teaser_img"
src='images/paper/2024PRSChallenge-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>Human-centered In-building Embodied Delivery Benchmark</b>
<br>Zhuoquan Xu, Yang Liu, Xiaoqi Li, Jiyao Zhang, Hao Dong<br>
<i>arXiv 2024</i><br>
[<a href="https://arxiv.org/pdf/2406.17898">Paper</a>]
[<a href="https://prsorg.github.io/#introduction">Webpage</a>]
</td>
</tr>
<tr class="paper2">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://unidexfpm.github.io/unidexfpm.pdf"><img class="teaser_img"
src='images/paper/2024UniDexFPM-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>UniDexFPM: Universal Dexterous Functional Pre-grasp Manipulation via Diffusion Policy</b>
<br>Tianhao Wu, YunChong Gan, Mingdong Wu, Jingbo Cheng, Yaodong Yang, Yixin Zhu, Hao Dong<br>
<i>arXiv 2024</i><br>
[<a href="https://unidexfpm.github.io/unidexfpm.pdf">Paper</a>]
[<a href="https://unidexfpm.github.io">Webpage</a>]
</td>
</tr>
<tr class="paper2">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://arxiv.org/pdf/2406.07579"><img class="teaser_img"
src='images/paper/2024GFPack++-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>GFPack++: Improving 2D Irregular Packing by Learning Gradient Field with Attention</b>
<br>Tianyang Xue, Lin Lu, Yang Liu, Mingdong Wu, Hao Dong, Yanbin Zhang, Renmin Han, Baoquan Chen<br>
<i>arXiv 2024</i><br>
[<a href="https://arxiv.org/pdf/2406.07579">Paper</a>]
</td>
</tr>
<tr class="paper1">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://arxiv.org/pdf/2406.04882"><img class="teaser_img"
src='images/paper/2024InstructNav-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>InstructNav: Zero-shot System for Generic Instruction Navigation in Unexplored Environment</b>
<br><i><font color="#1367a7">--- The world's first general navigation large model that unifies visual-language navigation, object navigation as well as demand-driven navigation into one single framework.</font></i>
<br>Yuxing Long, Wenzhe Cai, Hongcheng Wang, Guanqi Zhan, Hao Dong<br>
<i>Conference on Robot Learning (CoRL) 2024</i><br>
[<a href="https://arxiv.org/pdf/2406.04882">Paper</a>]
[<a href="https://sites.google.com/view/instructnav">Webpage</a>]
[<a href="https://github.com/LYX0501/InstructNav">Code</a>]
[<a href="https://mp.weixin.qq.com/s/T145ZQlDBTWyX621mNVYGQ">量子位</a>]
</td>
</tr>
<tr class="paper1">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://arxiv.org/pdf/2406.11548"><img class="teaser_img"
src='images/paper/2024AIC-MLLM-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>AIC-MLLM: Autonomous Interactive Correction MLLM for Robust Robotic Manipulation</b>
<br><i><font color="#1367a7">--- The first automatic system for low-level end-effector action correction in manipulation tasks.</font></i>
<br>Chuyan Xiong, Chengyu Shen, Xiaoqi Li, Kaichen Zhou, Jiaming Liu, Ruiping Wang, Hao Dong<br>
<i>Conference on Robot Learning (CoRL) 2024</i><br>
[<a href="https://arxiv.org/pdf/2406.11548">Paper</a>]
[<a href="https://sites.google.com/view/aic-mllm">Webpage</a>]
</td>
</tr>
<tr class="paper2">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://arxiv.org/pdf/2406.07549"><img class="teaser_img"
src='images/paper/2024A3VLM-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>A3VLM: Actionable Articulation-Aware Vision Language Model</b>
<br>Siyuan Huang, Haonan Chang, Yuhan Liu, Yimeng Zhu, Hao Dong, Peng Gao, Abdeslam Boularias, Hongsheng Li<br>
<i>Conference on Robot Learning (CoRL) 2024</i><br>
[<a href="https://arxiv.org/pdf/2406.07549">Paper</a>]
[<a href="https://github.com/changhaonan/A3VLM">Code</a>]
[<a href="https://mp.weixin.qq.com/s/1pRcbz1rqQjq2GWqyUqg6Q">OpenGVLab摘要</a>]
</td>
</tr>
<tr class="paper1">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://arxiv.org/pdf/2403.08355"><img class="teaser_img"
src='images/paper/2024RAL-NaturalVLM-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>NaturalVLM: Leveraging Fine-grained Natural Language for Affordance-Guided Visual Manipulation</b>
<br>Ran Xu, Yan Shen, Xiaoqi Li, Ruihai Wu, Hao Dong<br>
<i>IEEE Robotics and Automation Letters (RAL) 2024</i><br>
[<a href="https://arxiv.org/pdf/2403.08355">Paper</a>]
[<a href="https://sites.google.com/view/naturalvlm">Webpage</a>]
</td>
</tr>
<tr class="paper2">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://arxiv.org/pdf/2403.02604.pdf"><img class="teaser_img"
src='images/paper/2024UniDoorManip-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>UniDoorManip: Learning Universal Door Manipulation Policy over Large-scale and Diverse Door Manipulation Environments</b>
<br>Yu Li*, Xiaojie Zhang*, Ruihai Wu*, Zilong Zhang, Yiran Geng, Hao Dong, Zhaofeng He<br>
<i>arXiv 2024</i><br>
[<a href="https://arxiv.org/pdf/2403.02604.pdf">Paper</a>]
[<a href="https://unidoormanip.github.io">Webpage</a>]
[<a href="https://mp.weixin.qq.com/s/MTyLXXAx6vv_dS8wUZfMVw">量子位</a>]
</td>
</tr>
<tr class="paper1">
<td class="pub_td1">
<div class="teaser_img_div"><a href="https://jiyao06.github.io/Omni6DPose/"><img class="teaser_img"
src='images/paper/2024ECCV-Omni6DPose-min.jpg' /></a></div>
</td>
<td class="pub_td2"><b>Omni6DPose: A Benchmark and Model for Universal 6D Object Pose Estimation and Tracking</b>
<br><i><font color="#1367a7">--- The largest-scale benchmark for universal 6D object pose estimation.</font></i>
<br>Jiyao Zhang, Weiyao Huang, Bo Peng, Mingdong Wu, Fei Hu, Zijian Chen, Bo Zhao, Hao Dong<br>
<i>European Conference on Computer Vision (ECCV) 2024</i><br>
[<a href="https://arxiv.org/abs/2406.04316">Paper</a>]
[<a href="https://jiyao06.github.io/Omni6DPose/">Webpage</a>]
[<a href="https://github.com/Omni6DPose">Code</a>]
[<a href="https://mp.weixin.qq.com/s/wasOMlVaSNgMWO-g1I2-Dg">计算机视觉工坊</a>]