forked from handsworkshop/handsworkshop.github.io
-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.html
737 lines (675 loc) · 36.4 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta name="keywords"
content="hands,ECCV 2024,workshop,pose estimation,MANO,challenge,keypoint,gesture,robot,grasping,manipulation,hand tracking,motion capture">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="jemdoc, see http://jemdoc.jaboc.net/" />
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<link rel="stylesheet" href="main.css" type="text/css" />
<link rel="stylesheet" href="font-awesome/css/font-awesome.min.css">
<!--- <title></title> --->
<title>HANDS Workshop</title>
<!-- MathJax -->
<!-- End MathJax -->
</head>
<body>
<div id="main-container">
<div id="header-container">
<div id="header">
<div id="header-icon-text-container">
<div id="header-text-container">
<nav class="style1">
<ul id="outer_list">
<li id="outer_li_year"><a id="current_year" href="#">2024<span id="arrow"></span></a>
<ul id="top_list">
<!-- <li id="style2"><a id="style3" href="workshop2023.html">2023</a></li> -->
<li id="style2"><a id="style3" href="workshop2024.html">2024</a></li>
</ul>
</li>
<li id="outer_li"><a id="workshop_link" href="#">Workshop</a>
</li>
<li id="outer_li"><a id="challenge_link" href="#">Challenge</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<div id="layout-content">
<div id="text-img-container">
<div id="img-container">
<a href="https://hands-workshop.org/"><img src="./logos/hands.png" alt="HANDS" width="100%" /></a>
</div>
<div id="text-container"></div>
</div>
<p>
<div id="beamer">
<beam>
Observing and Understanding <b>Hands</b> in Action
</beam></br>
<beams>
in conjunction with ECCV 2024</br>
</beams>
</div>
<br>
<div id="menu-container">
<div id="menu-item"><a id="style6" href="#overview">Overview</a></div>
<div id="menu-item"><a id="style6" href="#schedule">Schedule</a></div>
<div id="menu-item"><a id="style6" href="#papers">Papers</a></div>
<div id="menu-item"><a id="style6" href="#speakers">Speakers</a></div>
<div id="menu-item"><a id="style6" href="#organizers">Organizers</a></div>
<div id="menu-item"><a id="style6" href="#sponsors">Sponsors</a></div>
<div id="menu-item"><a id="style6" href="#contact">Contact</a></div>
</div>
<br>
</p>
<h1 id="overview">Overview </h1>
<font size="5">
Welcome to our HANDS@ECCV24.
</font>
</br>
</br>
<p>Our HANDS workshop will gather vision researchers working on perceiving hands performing actions, including
2D
& 3D hand detection, segmentation, pose/shape estimation, tracking, etc. We will also cover related
applications including gesture recognition, hand-object manipulation analysis, hand activity understanding,
and
interactive interfaces.</p>
<p>The eighth edition of this workshop will emphasize the use of large foundation models (<i>e.g.</i> CLIP,
Point-E, Segment Anything, Latent Diffusion Models) for hand-related tasks. These models have revolutionized
the
perceptions of AI, and demonstrate groundbreaking contributions to multimodal understanding, zero-shot
learning,
and transfer learning. However, there remains an untapped potential for exploring their applications in
hand-related tasks. </p>
<h1 id="schedule">Schedule (Italy Time)</h1>
<p style="align-items: center;text-align: center;"><b>September 30th (2 pm-6 pm), 2024</b></br>
<b>Room Suite 8, MiCo Milano</b></br>
<b>Poster Boards Position: inside: 11, outside: 11</b></br>
<b>Online Zoom Link: </b><a href="https://nus-sg.zoom.us/j/9015323166?omn=82071666030">https://nus-sg.zoom.us/j/9015323166?omn=82071666030</a></br></p>
<table class="dataintable">
<tbody>
<tr>
<td><b>14:00 - 14:10</b></td>
<td>Opening Remarks</td>
</tr>
<tr>
<td><b>14:10 - 14:40</b></td>
<td> Invited Talk: Hanbyul Joo</td>
</tr>
<tr>
<td></td>
<td> <b>Title:</b> Towards Capturing Everyday Movements to Scale Up and Enrich Human Motion Data</br>
<b>Abstract:</b> In this talk, I will present our lab's efforts to scale and enrich 3D human motion data by capturing everyday human movements and natural human-object interactions. I will begin by describing our new multi-camera system, ParaHome, designed to capture human-object interactions in a natural home environment. Next, I will introduce MocapEve, a lightweight, cost-effective motion capture solution that uses two smartwatches and a head-mounted camera, enabling full-body 3D motion capture in diverse settings. Finally, I will discuss our recent work, CHRUS and COMa, which enable machines to model comprehensive affordances for 3D objects by leveraging pre-trained 2D diffusion models, allowing for unbounded object interactions.</td>
</tr>
<tr>
<td><b>14:40 - 15:10</b></td>
<td> Invited Talk: Shubham Tulsiani</td>
</tr>
<tr>
<td></td>
<td> <b>Title:</b> Understanding Human-object Interactions for Enabling Generalizable Robot Manipulation</br>
<b>Abstract:</b> We humans continually use our hands to interact with the world around us. From making our morning coffee to cleaning dishes after dinner, we effortlessly perform a plethora of tasks in our everyday lives. A central goal in robot learning is to develop similar generalist agents — ones capable of performing a diverse set of tasks across a wide range of environments. In this talk, I will highlight some of our recent efforts to build perception systems that better understand human interactions and allow robots to act in diverse scenarios. I will show how learning a (3D) generative model over human-object interactions can allow reconstructing interactions from in-the-wild clips, and how (2D) generative models over human interactions can guide robots acting in the real world.</td>
</tr>
<tr>
<td><b>15:10 - 16:10</b></td>
<td> Coffee break time & Poster </td>
</tr>
<tr>
<td> <b>16:10 - 16:40</b></td>
<td> Invited Talk: Qi Ye</td>
</tr>
<tr>
<td></td>
<td> <b>Title:</b> Understanding Hand-Object Interaction – From human hand reconstruction and generation to dexterous manipulation of robotic hands</br>
<b>Abstract:</b> In recent years, humanoid robots and embodied intelligence have attracted extensive attentions. One of the most challenging of endowing humanoid robots with intelligence is human-like dexterous manipulation with robotic hands. Unlike simple parallel grippers, human-like multi-fingered hands involve a high degree of freedom and complex variations in hand-object interaction, making it difficult for humanoid robots to acquire these manipulation skills. This report will explore how to effectively use human manipulation experience to overcome these challenges and develop and transfer human-like dexterous manipulation skills. It will cover our recent works on hand-object reconstruction, grasp generation and motion planning, multi-modal pretraining with human manipulation data for robotic hands, etc.</td>
</tr>
<tr>
<td> <b>16:40 - 17:10</b></td>
<td> Invited Talk: Shunsuke Saito</td>
</tr>
<tr>
<td></td>
<td> <b>Title:</b> Foundations for 3D Digital Hand Avatars</br>
<b>Abstract:</b> What constitutes the foundation for 3D digital hand avatars? In this presentation, we aim to establish the essential components necessary for creating high-fidelity digital hand models. We argue that relighting, animation/interaction, and in-the-wild generalization are crucial for bringing high-quality avatars to everyone. We will discuss several relightable appearance representations that achieve a photorealistic appearance under various lighting conditions. Furthermore, we will introduce techniques to effectively model animation and interaction priors, and demonstrate how to estimate complex hand-to-hand and hand-to-object interactions, even with data captured in uncontrolled environments. Finally, the talk will cover bridging the domain gap between high-quality studio data and large-scale in-the-wild data, which is key to enhancing robustness and diversity in avatar modeling algorithms. We will also explore how these foundations can complement and enhance each other.</td>
</tr>
<tr>
<td><b>17:10 - 17:25</b></td>
<td> Invited Talk: Prithviraj Banerjee </td>
</tr>
<tr>
<td></td>
<td> <b>Title:</b> HOT3D: A new benchmark dataset for vision-based understanding of 3D hand-object interactions</br>
<b>Abstract:</b> We introduce HOT3D, a publicly available dataset for egocentric hand and object tracking in 3D. The dataset offers over 833 minutes (more than 3.7M images) of multi-view RGB/monochrome image streams showing 19 subjects interacting with 33 diverse rigid objects, multi-modal signals such as eye gaze or scene point clouds, as well as comprehensive ground truth annotations including 3D poses of objects, hands, and cameras, and 3D models of hands and objects.In addition to simple pick-up/observe/putdown actions, HOT3D contains scenarios resembling typical actions in a kitchen, office, and living room environment. The dataset is recorded by two head-mounted devices from Meta: Project Aria, a research prototype of light-weight AR/AI glasses, and Quest 3, a production VR headset sold in millions of units. Ground-truth poses were obtained by a professional motion-capture system using small optical markers attached to hands and objects. Hand annotations are provided in the UmeTrack and MANO formats and objects are represented by 3D meshes with PBR materials obtained by an in-house scanner.We aim to accelerate research on egocentric hand-object interaction by making the HOT3D dataset publicly available and by co-organizing public challenges on the dataset. The dataset can be downloaded from the project website: facebookresearch.github.io</td>
</tr>
<tr>
<td> <b>17:25 - 17:53</b></td>
<td>
Competition Talks: Team JVhands <br>
<a style="visibility: hidden;"> Competition Talks:</a> Team HCB <br>
<a style="visibility: hidden;"> Competition Talks:</a> Team UVHANDS <br>
<a style="visibility: hidden;"> Competition Talks:</a> Team ACE
</td>
</tr>
<tr>
<td> <b>17:53 - 18:00</b></td>
<td> Closing Remarks</td>
</tr>
</tbody>
</table>
<h1 id="papers">Accepted Papers & Extended Abstracts</h1>
<!-- list papers in the form of:
OakInk2 : A Dataset for Long-Horizon Hand-Object Interaction and Complex Manipulation Task Completion.
Xinyu Zhan*, Lixin Yang*, Kangrui Mao, Hanlin Xu, Yifei Zhao, Zenan Lin, Kailin Li, Cewu Lu.
[pdf] -->
<p>We are delighted to announce the following accepted papers and extended abstracts will appear in the workshop! All full-length papers, extended abstracts and invited posters should prepare posters for communication during the workshop.</p> </br>
<p> <b>Poster size: the posters should be portrait (vertical), with a maximum size of 90x180 cm.</b></p>
<!-- <ul> -->
<h2>Full-length Papers</h2>
<ul>
<li> <b>W01</b> AirLetters: An Open Video Dataset of Characters Drawn in the Air <br>
<i>Rishit Dagli, Guillaume Berger, Joanna Materzynska, Ingo Bax, Roland Memisevic</i> <br>
<!-- [pdf] -->
</li>
<a href="files/2024/airletters.pdf">[pdf]</a>
</ul>
<ul>
<li> <b>W02</b> RegionGrasp: A Novel Task for Contact Region Controllable Hand Grasp Generation <br>
<i>Yilin Wang, Chuan Guo, Li Cheng, Hai Jiang</i> <br>
<!-- [pdf] -->
<a href="files/2024/regiongrasp.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W03</b> Generative Hierarchical Temporal Transformer for Hand Pose and Action Modeling <br>
<i>Yilin Wen, Hao Pan, Takehiko Ohkawa, Lei Yang, Jia Pan, Yoichi Sato, Taku Komura, Wenping Wang</i> <br>
<!-- [pdf] -->
<a href="files/2024/GHTT_ECCVW_final (1).pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W04</b> Adaptive Multi-Modal Control of Digital Human Hand Synthesis using a Region-Aware Cycle Loss <br>
<i>Qifan Fu, Xiaohang Yang, Muhammad Asad, Changjae Oh, Shanxin Yuan, Gregory Slabaugh</i> <br>
<!-- [pdf] -->
<a href="files/2024/adaptive.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W05</b> Conditional Hand Image Generation using Latent Space Supervision in Random Variable Variational Autoencoders <br>
<i>Vassilis Nicodemou, Iason Oikonomidis , Giorgos Karvounas, Antonis Argyros</i> <br>
<!-- [pdf] -->
<a href="files/2024/ECCVW_Hands_2024_CG_SRV_VAE-4.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W06</b> ChildPlay-Hand: A Dataset of Hand Manipulations in the Wild <br>
<i>Arya Farkhondeh*, Samy Tafasca*, Jean-Marc Odobez</i> <br>
<!-- [pdf] -->
<a href="files/2024/childplay-hand.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W07</b> EMAG: Ego-motion Aware and Generalizable 2D Hand Forecasting from Egocentric Videos <br>
<i>Masashi Hatano, Ryo Hachiuma, Hideo Saito</i> <br>
<!-- [pdf] -->
<a href="files/2024/emag.pdf">[pdf]</a>
</li>
</ul>
<!-- </ul> -->
<h2>Extended Abstracts</h2>
<ul>
<li> <b>W08</b> AFF-ttention! Affordances and Attention models for Short-Term Object Interaction Anticipation <br>
<i>Lorenzo Mur-Labadia, Ruben Martinez-Cantin, Jose J Guerrero, Giovanni Maria Farinella, Antonino Furnari</i> <br>
<!-- [pdf] -->
<a href="files/2024/affordances.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W09</b> Diffusion-based Interacting Hand Pose Transfer <br>
<i>Junho Park*,
Yeieun Hwang*,
Suk-Ju Kang#</i> <br>
<!-- [pdf] -->
<a href="files/2024/IHPT__ECCVW_2024_.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W10</b> Are Synthetic Data Useful for Egocentric Hand-Object Interaction Detection? <br>
<i>Rosario Leonardi,
Antonino Furnari,
Francesco Ragusa,
Giovanni Maria Farinella</i> <br>
<!-- [pdf] -->
<a href="files/2024/[ABSTRACT] Are_Synthetic_Data_Useful_for_Egocentric_Hand_Object_Interaction_Detection.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W11</b> Parameterized Quasi-Physical Simulators for Dexterous Manipulations Transfer <br>
<i>Xueyi Liu,
Kangbo Lyu,
jieqiong zhang,
Tao Du,
Li Yi</i> <br>
<!-- [pdf] -->
<a href="files/2024/QuasiSim_short_abstract_2.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W12</b> Pre-Training for 3D Hand Pose Estimation with Contrastive Learning on Large-Scale Hand Images in the Wild <br>
<i>Nie Lin*,
Takehiko Ohkawa*,
Mingfang Zhang,
Yifei Huang,
Ryosuke Furuta,
Yoichi Sato</i> <br>
<!-- add pdf from files/2024 -->
<a href="files/2024/Nie_Lin_HANDS@Workshop_ECCV24_HandCLR_Camera-ready_Submission.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W13</b> Task-Oriented Human Grasp Synthesis via Context- and Task-Aware Diffusers <br>
<i>An-Lun Liu, Yu-Wei Chao, Yi-Ting Chen</i> <br>
<!-- [pdf] -->
<a href="files/2024/ECCV24 Extended Abstracts Submission - Task-Oriented Human Grasp Synthesis via Context- and Task-Aware Diffusers.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W14</b> Action Scene Graphs for Long-Form Understanding of Egocentric Videos <br>
<i>Ivan Rodin*, Antonino Furnari*, Kyle Min*, Subarna Tripathi, Giovanni Maria Farinella</i> <br>
<!-- [pdf] -->
<a href="files/2024/EASG___HANDS.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W15</b> Get a Grip: Reconstructing Hand-Object Stable Grasps in Egocentric Videos <br>
<i>Zhifan Zhu, Dima Damen</i> <br>
<!-- [pdf] -->
<a href="files/2024/HANDS_2024_Workshop__Get_a_Grip.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W16</b> Self-Supervised Learning of Deviation in Latent Representation for Co-speech Gesture Video Generation <br>
<i>Huan Yang, Jiahui Chen, Chaofan Ding, Runhua Shi, Siyu Xiong, Qingqi Hong, Xiaoqi Mo, Xinhan Di</i> <br>
<!-- [pdf] -->
<a href="files/2024/ssl.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W17</b> OCC-MLLM-Alpha:Empowering Multi-modal Large Language Model for the Understanding of Occluded Objects with Self-Supervised Test-Time Learning <br>
<i>Shuxin Yang, Xinhan Di</i> <br>
<!-- [pdf] -->
<a href="files/2024/OCC_MLLM_Alpha_Empowering_Multimodal_Large_Language_Model_For_the_Understanding_of_Occluded_Objects.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W18</b> Dyn-HaMR: Recovering 4D Interacting Hand Motion from a Dynamic Camera <br>
<i>Zhengdi Yu, Alara Dirik, Stefanos Zafeiriou, Tolga Birdal</i> <br>
<!-- [pdf] -->
<a href="files/2024/Dyn_HaMR_ECCVW_2024_extended_abstract.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W19</b> Learning Dexterous Object Manipulation with a Robotic Hand via
Goal-Conditioned Visual Reinforcement Learning Using Limited Demonstrations <br>
<i>Samyeul Noh, Hyun Myung</i> <br>
<!-- [pdf] -->
<a href="files/2024/learning.pdf">[pdf]</a>
</li>
</ul>
<h2>Invited Posters</h2>
<ul>
<li> <b>W20</b> AttentionHand: Text-driven Controllable Hand Image Generation for 3D Hand Reconstruction in the Wild <br>
<i>Junho Park*, Kyeongbo Kong*, Suk-Ju Kang#</i> <br>
<!-- [pdf] -->
<a href="files/2024/attentionhand.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W21</b> HandDAGT : A Denoising Adaptive Graph Transformer for 3D Hand Pose Estimation <br>
<i>Wencan Cheng, Eunji Kim, Jong Hwan Ko</i> <br>
<!-- [pdf] -->
<a href="files/2024/ECCV24_poster_HandDAGT.pdf">[poster]</a>
</li>
</ul>
<ul>
<li> <b>W22</b> On the Utility of 3D Hand Poses for Action Recognition <br>
<i>Md Salman Shamil, Dibyadip Chatterjee, Fadime Sener, Shugao Ma, Angela Yao</i> <br>
<!-- [pdf] -->
<a href="https://arxiv.org/pdf/2403.09805">[pdf]</a>
</li>
</ul>
<ul>
<li> <b>W23</b> ActionVOS: Actions as Prompts for Video Object Segmentation <br>
<i>Liangyang Ouyang, Ruicong Liu, Yifei Huang, Ryosuke Furuta, Yoichi Sato</i> <br>
<!-- [pdf] -->
<a href="files/2024/ActionVOS.pdf">[poster]</a>
</li>
</ul>
<ul>
<li> <b>W24</b> GraspXL: Generating Grasping Motions for Diverse Objects at Scale <br>
<i>Hui Zhang, Sammy Christen, ZicongFan, OtmarHilliges, Jie Song</i> <br>
<!-- [pdf] -->
<a href="files/2024/GraspXL.pdf">[poster]</a>
</li>
</ul>
<h2>Technical Reports</h2>
<ul>
<li> 3DGS-based Bimanual Category-agnostic Interaction Reconstruction <br>
<i>Jeongwan On, Kyeonghwan Gwak, Gunyoung Kang, Hyein Hwang, Soohyun Hwang, Junuk Cha, Jaewook Han, Seungryul Baek</i> <br>
<!-- [pdf] -->
<a href="files/2024/UVHANDS.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> 2nd Place Solution Technical Report for Hands’24 ARCTIC Challenge from Team ACE <br>
<i>Congsheng Xu*, Yitian Liu*, Yi Cui, Jinfan Liu, Yichao Yan, Weiming Zhao, Yunhui Liu, Xingdong Sheng</i> <br>
<!-- [pdf] -->
<a href="files/2024/ACE.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li> Solution of Multiview Egocentric Hand Tracking Challenge ECCV2024 <br>
<i>Minqiang Zou, Zhi Lv, Riqiang Jin, Tian Zhan, Mochen Yu, Yao Tang, Jiajun Liang#</i> <br>
<!-- [pdf] -->
<a href="files/2024/JVHANDS.pdf">[pdf]</a>
</li>
</ul>
<ul>
<li>Technical report of HCB team for Multiview Egocentric Hand Tracking Challenge on HANDS 2024 Challenge <br>
<i>Haohong Kuang, Yang Xiao#, Changlong Jiang, Jinghong Zheng, Hang Xu, Ran Wang, Zhiguo Cao, Min Du, Zhiwen Fang, Joey Tianyi Zhou</i> <br>
<!-- [pdf] -->
<a href="files/2024/HCB.pdf">[pdf]</a>
</li>
</ul>
<!-- <h2>Important Dates (Deadline has been extended)</h2>
<table class="dataintable">
<tbody>
<tr>
<td><b>June 15, 2024 (Opened)</b></td>
<td>Paper Submission Start</td>
</tr>
<tr>
<td><del>July 25</del> <b>August 10, 2024 11:59 (Pacific Time)</b></td>
<td>Full Length Paper and its Supplementary Materials Submission Deadline</td>
</tr>
<tr>
<td><b>August 15, 2024, 11:59 (Pacific Time)</b></td>
<td>CMT for Extended Abstracts & Posters Submission is Closed, Further Submission can be submitted via Email ([email protected])</td>
</tr>
<tr>
<td><del>August 10</del> <b>August 20, 2024, 11:59 (Pacific Time)</b></td>
<td>Full Length Paper Author Notification, Extended Abstracts & Posters Submission Deadline</td>
</tr>
<tr>
<td><del>August 20</del> <b>August 25, 2024, 11:59 (Pacific Time)</b></td>
<td>Camera-ready Submission</td>
</tr>
</tbody>
</table> -->
<!--
<h2>Topics</h2>
We will cover all hand-related topics. The relevant topics include and not limited to:
<ul id="topicstyle1">
<li id="topicstyle2">Hand pose and shape estimation</li>
<li id="topicstyle2">Hand & object interactions</li>
<li id="topicstyle2">Hand detection/segmentation</li>
<li id="topicstyle2">Hand gesture/action recognition</li>
<li id="topicstyle2">4D hand tracking and motion capture</li>
<li id="topicstyle2">Hand motion synthesis</li>
<li id="topicstyle2">Hand modeling, rendering, generation</li>
<li id="topicstyle2">Camera systems and annotation tools</li>
<li id="topicstyle2">Novel algorithms and network architectures</li>
<li id="topicstyle2">Multi-modal learning</li>
<li id="topicstyle2">Self-/un-/weakly-supervised learning</li>
<li id="topicstyle2">Generalization and adaptation</li>
<li id="topicstyle2">Egocentric vision for AR/VR</li>
<li id="topicstyle2">Robot grasping, object manipulation, Haptics</li>
</ul>
-->
<!-- <h2>Submission Guidelines</h2>
<b>Submission Website:</b> <a
href="https://cmt3.research.microsoft.com/HANDS2024">https://cmt3.research.microsoft.com/HANDS2024</a>
</br>
</br>
<p>We accept <b>full length papers</b>, <b>extended abstracts</b> and <b>posters</b> in our workshop. The full
length submissions should be anonymized and will be one-round peer reviewed. The accepted full length papers
will be included in the ECCV24 conference proceedings, while others will only be presented in the workshop. We
welcome papers that are accepted to the ECCV2024 main conference or previous conferences, and extended
abstracts that in progress, to show the posters in our workshop.</p>
<h3>Call for full length papers</h3>
<p>Full length submissions should follow <a
href="https://eccv2024.ecva.net/Conferences/2024/AuthorGuide">ECCV2024 submission policies</a> (no more than
14 pages) and use the official ECCV 2024 template. Note that the submissions violate the double-blind policy
or the dual-submission policy will be rejected without review.</p>
<h3>Call for extended abstracts and posters</h3>
<p>Extended abstracts are not subject to the ECCV rules, and can use other templates. They should be shorter
than the equivalent of 4 pages in CVPR template format. Note that extended abstracts are not anonymized, and
will not be peer reviewed. Accepted extended abstracts will only publish on our website, show in our workshop
and not appear in the ECCV24 conference proceedings. Posters can be simply submitted with PDF and their brief
information like paper title, authors and conference name. </p>
<p><b>The CMT system will close after August 15. After that, extended abstracts and posters can still be submitted via emails ([email protected]) until August 20.</b></p> -->
<h1 id="speakers">Invited Speakers</h1>
<div id="member-container">
<div id="member">
<img src="./profiles/2024/han.jpg">
<p>
<b><a href="https://jhugestar.github.io">Hanbyul Joo</a></b></br>
Seoul National University
</p>
</div>
<div id="member">
<img src="./profiles/2024/prithviraj.png">
<p>
<b><a href="https://scholar.google.com/citations?user=ZmDUIeMAAAAJ&hl=en">Prithviraj Banerjee</a></b></br>
Meta
</p>
</div>
<div id="member">
<img src="./profiles/2024/yeqi.jpg">
<p>
<b><a href="https://person.zju.edu.cn/en/yeqi">Qi Ye</a></b></br>
Zhejiang University
</p>
</div>
<div id="member">
<img src="./profiles/2024/shunsuke.jpg">
<p>
<b><a href="https://shunsukesaito.github.io">Shunsuke Saito</a></b></br>
Reality Labs Research
</p>
</div>
<div id="member">
<img src="./profiles/2024/shubham.jpg">
<p>
<b><a href="https://shubhtuls.github.io">Shubham Tulsiani</a></b></br>
Carnegie Mellon University
</p>
</div>
</div>
<h1 id="organizers">Organizers</h1>
<p>
<div id="member-container">
<div id="member">
<img src="./profiles/2024/hyung.jpg">
<p>
<b><a href="https://hyungjinchang.wordpress.com">Hyung Jin Chang</a></b></br>
University of Birmingham
</p>
</div>
<div id="member">
<img src="./profiles/2024/rongyu.jpg">
<p>
<b><a href="https://gloryyrolg.github.io">Rongyu Chen</a></b></br>
National University of Singapore
</p>
</div>
<div id="member">
<img src="./profiles/2024/zicong.png">
<p>
<b><a href="https://zc-alexfan.github.io">Zicong Fan</a></b></br>
ETH Zurich
</p>
</div>
<div id="member">
<img src="./profiles/2024/otmar.png">
<p>
<b><a href="https://ait.ethz.ch/people/hilliges">Otmar Hilliges</a></b></br>
ETH Zurich
</p>
</div>
<div id="member">
<img src="./profiles/2024/kun.png">
<p>
<b><a href="https://kunhe.github.io">Kun He</a></b></br>
Meta Reality Labs
</p>
</div>
<div id="member">
<img src="./profiles/2024/take.png">
<p>
<b><a href="https://tkhkaeio.github.io/">Take Ohkawa</a></b></br>
University of Tokyo
</p>
</div>
<div id="member">
<img src="./profiles/2024/yoichi.png">
<p>
<b><a href="https://sites.google.com/ut-vision.org/ysato/home">Yoichi Sato</a></b></br>
University of Tokyo
</p>
</div>
<div id="member">
<img src="./profiles/2024/elden.jpg">
<p>
<b><a href="https://eldentse.github.io">Elden Tse</a></b></br>
National University of Singapore
</p>
</div>
<div id="member">
<img src="./profiles/2024/linlin.png">
<p>
<b><a href="https://mu4yang.com">Linlin Yang</a></b></br>
Communication University of China
</p>
</div>
<div id="member">
<img src="./profiles/2024/lixin.png">
<p>
<b><a href="https://lixiny.github.io">Lixin Yang</a></b></br>
Shanghai Jiao Tong University
</p>
</div>
<div id="member">
<img src="./profiles/2024/angela.png">
<p>
<b><a href="https://www.comp.nus.edu.sg/cs/people/ayao/">Angela Yao</a></b></br>
National University of Singapore
</p>
</div>
<div id="member">
<img src="./profiles/2024/linguang.png">
<p>
<b><a href="https://lg-zhang.github.io/">Linguang Zhang</a></b></br>
Facebook Reality Labs (Oculus)
</p>
</div>
</div>
</p>
<h1 id="sponsors">Technical Program Committee</h1>
<p>Thank you so much to the Technical Program Committee for their thoughtful reviews.</p>
<ul id="topicstyle1">
<li id="topicstyle2">Chenyangguang Zhang (Tsinghua University)</li>
<li id="topicstyle2">Gyeongsik Moon (Meta)</li>
<li id="topicstyle2">Jiayin Zhu (NUS)</li>
<li id="topicstyle2">Jihyun Lee (KAIST)</li>
<li id="topicstyle2">Junuk Cha (UNIST)</li>
<li id="topicstyle2">Kailin Li (Shanghai Jiao Tong University)</li>
<li id="topicstyle2">Keyang Zhou (University of Tübingen)</li>
<li id="topicstyle2">Pengzhan Sun (NUS)</li>
<li id="topicstyle2">Rolandos Alexandros Potamias (Imperial College London)</li>
<li id="topicstyle2">Seungryul Baek (UNIST)</li>
<li id="topicstyle2">Takuma Yagi (AIST)</li>
<li id="topicstyle2">Zerui Chen (Inria Paris)</li>
<li id="topicstyle2">Zhiying Leng (Beihang University)</li>
<li id="topicstyle2">Zhongqun Zhang (University of Birmingham)</li>
</ul>
<h1 id="sponsors">Sponsors</h1>
<div class="sponsors-container">
<img class="sponsor-img" src="./profiles/2024/sponsor1.png">
<img class="sponsor-img" src="./profiles/2024/sponsor2_1.svg">
<img class="sponsor-img" src="./profiles/2024/sponsor2_2.png">
</div>
<h1 id="contact">Contact</h1>
<p>[email protected]</p>
<div id="footer">
<p style="align-items: center;text-align: center;">
<a href="https://youtube.com/@handsworkshop" target="_Blank">
<img id="page1" alt="" src="profiles/youtube.jpg">
</a>
<a href="https://github.com/handsworkshop" target="_Blank">
<img id="page" alt="" src="profiles/github.png">
</a>
</p>
</div>
<script>
document.getElementById('outer_li_year').addEventListener('click', function (event) {
event.preventDefault(); // 阻止默认链接行为
// 获取第一个<li>标签中的年份
var year = document.querySelector('#outer_list > li:first-child > a').textContent.trim();
// 构建新的href
var newHref = 'workshop' + year + '.html';
// 跳转到新的页面
window.location.href = newHref;
});
document.getElementById('workshop_link').addEventListener('click', function (event) {
event.preventDefault(); // 阻止默认链接行为
// 获取第一个<li>标签中的年份
var year = document.querySelector('#outer_list > li:first-child > a').textContent.trim();
// 构建新的href
var newHref = 'workshop' + year + '.html';
// 跳转到新的页面
window.location.href = newHref;
});
document.getElementById('challenge_link').addEventListener('click', function (event) {
event.preventDefault(); // 阻止默认链接行为
// 获取第一个<li>标签中的年份
var year = document.querySelector('#outer_list > li:first-child > a').textContent.trim();
// 构建新的href
var newHref = 'challenge' + year + '.html';
// 跳转到新的页面
window.location.href = newHref;
});
// 获取所有带有id="style3"的a标签
var yearLinks = document.querySelectorAll('#style3');
yearLinks.forEach(function (link) {
link.addEventListener('click', function (event) {
event.preventDefault(); // 阻止默认链接行为
// 获取点击的年份
var selectedYear = this.textContent.trim();
// 更新第一个li中的年份显示
document.getElementById('current_year').textContent = selectedYear;
// 关闭下拉菜单
// document.getElementById('top_list').style.display = 'none';
// 可选:如果需要,可以在这里添加跳转逻辑
// window.location.href = this.href;
});
});
var workshopLi = document.querySelector('#workshop_link');
workshopLi.classList.add('highlight');
// function highlightLinks() {
// var currentHref = window.location.href;
// alert("执行")
// alert(currentHref)
// if (currentHref.includes('workshop')) {
// var workshopLi = document.querySelector('#workshop_link');
// workshopLi.classList.add('highlight');
// } else if (currentHref.includes('challenge')) {
// var workshopLi1 = document.querySelector('#challenge_link');
// workshopLi1.classList.add('highlight');
// }
// }
// window.addEventListener('popstate', highlightLinks);
</script>
</body>
</html>