forked from handsworkshop/handsworkshop.github.io
-
Notifications
You must be signed in to change notification settings - Fork 0
/
challenge2024.html
529 lines (464 loc) · 30 KB
/
challenge2024.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta name="keywords"
content="hands,ECCV 2024,workshop,AssemblyHands,ARCTIC,OakInk2,UmeTrack,pose estimation,MANO,challenge,keypoint,gesture,robot,grasping,manipulation,hand tracking,motion capture">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="generator" content="jemdoc, see http://jemdoc.jaboc.net/" />
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<link rel="stylesheet" href="main.css" type="text/css" />
<link rel="stylesheet" href="font-awesome/css/font-awesome.min.css">
<!--- <title></title> --->
<title>HANDS Workshop</title>
<!-- MathJax -->
<!-- End MathJax -->
</head>
<body>
<div id="main-container">
<div id="header-container">
<div id="header">
<div id="header-icon-text-container">
<div id="header-text-container">
<nav class="style1">
<ul id="outer_list">
<li id="outer_li_year"><a id="current_year" href="#">2024<span id="arrow"></span></a>
<ul id="top_list">
<!-- <li id="style2"><a id="style3" href="workshop2023.html">2023</a></li> -->
<li id="style2"><a id="style3" href="workshop2024.html">2024</a></li>
</ul>
</li>
<li id="outer_li"><a id="workshop_link" href="#">Workshop</a>
</li>
<li id="outer_li"><a id="challenge_link" href="#">Challenge</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<div id="layout-content">
<div id="text-img-container">
<div id="img-container">
<a href="https://hands-workshop.org/"><img src="./logos/hands.png" alt="HANDS" width="100%" /></a>
</div>
<div id="text-container"></div>
</div>
<p>
<div id="beamer">
<beam>
Observing and Understanding <b>Hands</b> in Action
</beam></br>
<beams>
in conjunction with ECCV 2024</br>
</beams>
</div>
<br>
<div id="menu-container">
<div id="menu-item"><a id="style6" href="#overview">Overview</a></div>
<div id="menu-item"><a id="style6" href="#challenge1">AssemblyHands</a></div>
<div id="menu-item"><a id="style6" href="#challenge2">ARCTIC</a></div>
<div id="menu-item"><a id="style6" href="#challenge3">OakInk2</a></div>
<div id="menu-item"><a id="style6" href="#challenge4">MegoTrack</a></div>
<div id="menu-item"><a id="style6" href="#contact">Contact</a></div>
</div>
<br>
</p>
<h1 id="overview">Overview</h1>
<p>We present the HANDS24 challenge for hands based on AssemblyHands, ARCTIC, OakInk2 and UmeTrack. To
participate in the challenge, please fill the <a href="https://forms.gle/cLnjFY2YFKBrw6kN7"><b>Google
Form</b></a> and accept the terms and conditions.</p>
<h4 align="center">Winners and prizes will be announced and awarded during the workshop.</h4>
<h4 align="center">Please see <b>General Rules and Participation</b> and <b>Four Tracks</b> below for more
details.</h4>
<h2>Timeline</h1>
<table class="dataintable">
<tbody>
<tr>
<td><b>July 15 2024 (Opened)</b></td>
<td>Challenge data & website release & registration open</td>
</tr>
<tr>
<td><b>around August 5 2024 (Please refer to each track page for details)</b></td>
<td>Challenges start</td>
</tr>
<tr>
<td><b>September 8 2024</b></td>
<td>Registration close</td>
</tr>
<tr>
<td><b>around September 15 2024 (Please refer to each track page for the specific deadline)</b></td>
<td>Challenge submission deadline</td>
</tr>
<tr>
<td><b>September 20 2024</b></td>
<td>Decisions to participants</td>
</tr>
<tr>
<td><b>September 25 2024</b></td>
<td>Technical Report Deadline</td>
</tr>
</tbody>
</table>
<h2>General Rules and Participation</h1>
<p>We must follow the general challenge rules below and more rules can be found on each offical track page.
</p>
<ul>
<li>
<p>To participate in the challenge, please fill the <a
href="https://forms.gle/cLnjFY2YFKBrw6kN7"><b>Google Form</b></a> and accept the terms and
conditions.</p>
<ul>
<li>
<p>Please <b>DO</b> use your institution's email address. Please <b>DO NOT</b> use your personal
email address such as gmail.com, qq.com and 163.com.</p>
</li>
<li>
<p>Each team must register under only one user id/email id. The list of team members <b>can not be
changed or rearranged</b> throughout the competition.</p>
</li>
<li>
<p>Each team should use the same email for creating accounts in the evaluation server, and use the
team name (in verbatim) in the evaluation servers.</p>
</li>
<li>
<p>Each individual can only participate in one team and should provide the institution email at
registration.</p>
</li>
<li>
<p>The team name should be formal and we preserve the rights to change the team name after
discussing with teams.</p>
</li>
<li>
<p>Each team will receive a registration email after registration.</p>
</li>
<li>
<p>Teams found to be registered under multiple IDs will be disqualified.</p>
</li>
<li>
<p>For any special cases, please email the organizers.</p>
</li>
</ul>
</li>
<li>
<p>The primary contact email is important during registration.</p>
<ul>
<li>
<p>We will contact participants via emails once we have an important update.</p>
</li>
<li>
<p>If there are any special reasons or ambiguities that may lead to disputes, please email the
organizers first for explanation or approval. Subsequent contact may result in disqualification.
</p>
</li>
</ul>
</li>
<li>
<p>To encourage fair competition, different tracks may include limits like overall model size, training
dataset etc. Details can be found in each track page.</p>
<ul>
<li>
<p>Teams may use any publicly available and appropriately licensed data (if allowed by the track) to
train their models in addition to the ones provided by the organizers. </p>
</li>
<li>
<p>The daily and overall submission number may be limited based on tracks.</p>
</li>
<li>
<p>The best performance of a team <b>CAN NOT</b> be hidden during the competition. Hiding the best
performance may result in warning or even disqualification.</p>
</li>
<li>
<p>Any supervised/unsupervised training on the validation/testing set is not allowed in this
competition.</p>
</li>
</ul>
</li>
<li>
<p>Reproducibility is the responsibility of the winning teams and we invite all teams to advertise their
methods.</p>
<ul>
<li>
<p>Winning methods should provide their source code to reproduce their results under strict
confidentiality rules if requested by organizers/other participants. If the organizing committee
determines that the submitted code runs with errors or does not yield results comparable to those
in the final leaderboard and the team is not willing to cooperate, it will be disqualified, and
the winning place will go to the next team in the leaderboard. </p>
</li>
<li>
<p>In order for participants to be eligible for competition prizes and be included in the official
rankings (to be presented during the workshop and subsequent publications), information about
their submission must be provided to organizers. Information may include, but not limited to,
details on their method, synthetic and real data use, architecture and training details. </p>
</li>
<li>
<p>For each submission, participants must keep the parameters of their method constant across all
testing data for a given track.</p>
</li>
<li>
<p>To be considered a valid candidate in the competition, the method has to beat the baseline by a
non-trivial margin. A method is invalid if there is no significant technical changes. For example,
if you simply replace a ResNet18 backbone with a ResNet101 backbone, it is not counted as a valid
method. The organizers preserve all rights to determine the validity of the method. We will invite
all valid teams to advertise their methods via 2-3 page technical report, and or poster
presentation.</p>
</li>
<li>
<p>Winners should provide a 2-3 page technical report, winner talk, and poster presentation during
the workshop.</p>
</li>
</ul>
</li>
</ul>
<h1 id="challenge1">AssemblyHands-S2D (2nd edition) </h1>
<p>
<a href="https://assemblyhands.github.io/">AssemblyHands</a> is a large-scale benchmark with accurate 3D hand pose annotations to facilitate the study of egocentric activities with challenging hand-object interactions. The 1st edition of this challenge at ICCV'23 featured egocentric 3D hand pose estimation from a single-view image and made significant progress on the single-view estimation.
</p>
<p align="center" style="margin: 2; padding: 2;">
<img src="assets/assemblyhands/s2dhand_hlong.png" alt="Image" width="80%" style="display: block; margin: 0 auto;"/>
</p>
<p>
At ECCV’24, we offer an extended AssemblyHands challenge for multi-view settings, dubbed single-view-to-dual-view adaptation of 3D hand pose estimation (<a ref="https://github.com/ut-vision/S2DHand">S2DHand</a>). Given a pre-trained single-view estimator, participants are asked to develop unsupervised adaptation algorithms to arbitrary-placed dual-view images, assuming camera parameters and keypoint ground-truth of these views are not given. This adaptation would bridge the gap of camera settings more flexibly and allow us to test various camera configurations for better multi-view hand pose estimation. The details of the challenge rules, data, and evaluation are as follows.
</p>
<h3>Rules</h3>
<p>
<ul>
<li>Only use the provided pre-trained weights for adaptation. In order to align the backbone capacity across submissions, <b>DO NOT</b> increase the backbone model size or use other backbone networks.</li>
<li><b>DO NOT</b> use keypoint ground-truth of validation and test sets from AssemblyHands in any ways, e.g., re-training</li>
<li><b>DO NOT</b> use annotated camera extrinsics from AssemblyHands in any ways, including selecting training samples with extrinsics.</li>
<li><b>DO NOT</b> use the validation set for training or fine-tuning.</li>
<li><b>DO NOT</b> use any other data and annotations from Assembly101.</li>
<li>Participants only allow the use of two-view images during adaptation. </li>
<li>We reserve the right to determine if a method is valid and eligible for awards.</li>
</ul>
</p>
<p><b>Metric: </b> We use wrist-relative MPJPE for each dual-camera pair.</p>
<p><b>Submission: </b>: Please follow this submission instructions: <a href="https://github.com/ut-vision/ECCV2024-AssemblyHands-S2D">[ECCV2024-AssemblyHands-S2D]</a>. You can find test images and metadata, and an example of a submission file. Please compress your prediction json into a zip file and upload it to the submission server: <a href="https://codalab.lisn.upsaclay.fr/competitions/19885">[CodaLab]</a>. Our baseline result after adaptation is listed as the name "LRC".
<p><b>Update Notes: </b><br>
(Aug 2): We updated the eval server and submission code.<br>
(July 4): We're constructing the dataset and evaluation server tailored for this challenge. Check the S2DHand project for the reference of this task.
</p>
<p>
If you have any questions regarding task, rules, and technical issues, please contact via the hands group email.
Challenge committee: Ruicong Liu, Takehiko Ohkawa, and Kun He
</p>
<div style= "background-color: #e9e9e9; border: 1px solid black; text-align: center;">
<h4 align="center">Check out the following links to get started</h4>
<!-- <h4 align="center">S2DHand project page: <a href="https://github.com/ut-vision/S2DHand"><b>https://github.com/ut-vision/S2DHand</b></a></h4> -->
<h4 align="center">Baseline method (S2DHand): <a href="https://github.com/ut-vision/S2DHand_HANDS2024"><b>https://github.com/ut-vision/S2DHand_HANDS2024</b></a></h4>
<h4 align="center">Submission code: <a href="https://github.com/ut-vision/ECCV2024-AssemblyHands-S2D"><b>https://github.com/ut-vision/ECCV2024-AssemblyHands-S2D</b></a></h4>
<h4 align="center">Leaderboard: <a href="https://codalab.lisn.upsaclay.fr/competitions/19885"><b>https://codalab.lisn.upsaclay.fr/competitions/19885</b></a></h4>
<h4 align="center">AssemblyHands dataset: <a href="https://assemblyhands.github.io/"><b>https://assemblyhands.github.io/</b></a></h4>
</div>
<h1 id="challenge2">ARCTIC</h1>
<p>
Humans interact with various objects daily, making holistic 3D capture of these interactions crucial for
modeling human behavior. Most methods for reconstructing hand-object interactions require pre-scanned 3D
object templates, which are impractical in real-world scenarios. Recently, HOLD (<a
href="https://zc-alexfan.github.io/hold">Fan et al. CVPR’24</a>) has shown promise in category-agnostic
hand-object reconstruction but is limited to single-hand interaction.
</p>
<p>
Since we naturally interact with both hands, we host the bimanual category-agnostic reconstruction task
where participants must reconstruct both hands and the object in 3D from a video clip, without relying on
pre-scanned templates. This task is more challenging as bimanual manipulation exhibits severe hand-object
occlusion and dynamic hand-object contact, leaving rooms for future development.
</p>
<p align="center" style="margin: 2; padding: 2;">
<img src="assets/arctic/sushi.gif" alt="Image" width="100%" style="display: block; margin: 0 auto;" />
</p>
<p>
To benchmark this challenge, we adapt HOLD to two-hand manipulation settings and use 9 videos from <a
href="https://github.com/zc-alexfan/arctic">ARCTIC dataset</a>'s rigid object collection, one per object
(excluding small objects such as scissors and phone), and sourced from the test set for this challenge.
You will be provided with HOLD baseline skeleton code for the ARCTIC setting, as well as code to produce
data for our evaluation server to evaluate.
</p>
<h3>Important Notes</h3>
<ul>
<li>Submission deadline for results: September 15, 2024 (11:59PM PST) </li>
<li>Results will be shared during the HANDS workshop at ECCV 2024 on September 30, 2024</li>
</ul>
<h3>Rules</h3>
<ul>
<li>Participants cannot use groundtruth intrinsics, extrinsics, hand/object annotations, or object
templates from ARCTIC.</li>
<li>Only use the provided pre-cropped ARCTIC images for the competition.</li>
<li>The test set groundtruth is hidden; submit predictions to our evaluation server for assessment
(details coming soon).</li>
<li>Different hand trackers or methods to estimate object pose can be used if not trained on ARCTIC data.
</li>
<li>Participants may need to submit code for rule violation checks.</li>
<li>The code must be reproducible by the organizers.</li>
<li>Reproduced results should match the reported results.</li>
<li>Participants may be disqualified if results cannot be reproduced by the organizers.</li>
<li>Methods must show non-trivial novelty; minor changes like hyperparameter tuning do not count.</li>
<li>Methods must outperform the HOLD baseline by at least 5% to be considered valid, avoiding small margin
improvements due to numerical errors.</li>
<li>We reserve the right to determine if a method is valid and eligible for awards.</li>
</ul>
<p><b>Metric: </b>We use hand-relative chamfer distance, CD_h, (the lower the better) as the main metric for
this competition. It is defined in the <a href="https://arxiv.org/abs/2311.18448">HOLD paper</a>. For this
two-hand setting, we average the left and right hand CD_h metrics.</p>
<p><b>Support: </b>For general tips on processing and improvement on HOLD (see <a
href="https://github.com/zc-alexfan/hold/issues/6">here</a>). For other technical questions, raise an
issue. Should you have any confusion regarding the ARCTIC challenge (e.g., regarding to the rules above),
feel free to contact [email protected].</p>
<div style="background-color: #e9e9e9; border: 1px solid black; text-align: center; word-wrap: break-word;">
<h4 align="center">Check out the following links to get started</h4>
<h4 align="center">HOLD project page: <a
href="https://zc-alexfan.github.io/hold"><b>https://zc-alexfan.github.io/hold</b></a></h4>
<h4 align="center">HOLD code: <a
href="https://github.com/zc-alexfan/hold"><b>https://github.com/zc-alexfan/hold</b></a></h4>
<h4 align="center">Challenge instructions: <a
href="https://github.com/zc-alexfan/hold/blob/master/docs/arctic.md"><b>https://github.com/zc-alexfan/hold/blob/master/docs/arctic.md</b></a>
</h4>
<h4 align="center">Leaderboard: <a
href="https://arctic-leaderboard.is.tuebingen.mpg.de/leaderboard"><b>https://arctic-leaderboard.is.tuebingen.mpg.de/leaderboard</b></a>
</h4>
<h4 align="center">ARCTIC dataset: <a
href="https://arctic.is.tue.mpg.de/"><b>https://arctic.is.tue.mpg.de/</b></a></h4>
</div>
<h1 id="challenge3">OakInk2 </h1>
<p>This challenge focuses on the synthesis of physically plausible hand-object
interactions that leverage object functionalities. The goal of the challenge is to utilize
demonstration
trajectories from existing datasets to generate hand-object interaction trajectories that are sufficient
for completing the specified functions in a designated physical simulation environment. The participants
need to synthesize trajectories that can be rolled out in the simulation environment. These trajectories
need to utilize the object's functions to achieve the task goals from the given initial state of the task.
We use the <a href="https://oakink.net/v2">OakInk2 dataset (Zhan et al. CVPR'24) </a> as the source of
demonstration trajectories
in this challenge. OakInk2 contains hand-object interaction demonstrations centered around fulfilling
object affordances.
In this challenge, we provide the transferred demonstrations from human hand
(MANO) to a dexterous embodiment (ShadowHand) for the IsaacGyn environment.
</br></br>
This challenge provides the following resources:
<ul>
<li>A toolkit for setting up the simulation environment;</li>
<li>Relevant object assets and task initial conditions;</li>
<li>Raw demo trajectories transferred from corresponding primitive task in OakInk2.</li>
</ul>
Participants need to submit the following files (two methods):
<ul>
<li><b>(Method 1)</b> Code snippets and corresponding instructions for rolling out trajectories in the
simulation
environment. (Only inference, no need for training) plus model weights if the submitted code requires
them to run.</li>
<li><b>(Method 2)</b> Full simulator states of all synthesized trajectories.</li>
</ul>
The submission can be in three forms:
<ul>
<li>Upload a link pointing to a public code repository containing the executable code and weights;</li>
<li>Upload an archive containing the executable code and weights;</li>
<li>Upload an archive containing the full simulator states of all synthesized trajectories.</li>
</ul>
</p>
<div style="background-color: #e9e9e9; border: 1px solid black; text-align: center; word-wrap: break-word;">
<h4 align="center">Check out the following links to get started</h4>
<h4 align="center">OakInk2 project page: <a href="https://oakink.net/v2"><b>https://oakink.net/v2</b></a>
</h4>
<h4 align="center"> Toolkit for loading and visualization <a
href="https://github.com/oakink/OakInk2-PreviewTool"><b>https://github.com/oakink/OakInk2-PreviewTool</b></a>
</h4>
<h4 align="center">Challenge instructions: <a
href="https://github.com/kelvin34501/OakInk2-SimEnv-IsaacGym/wiki"><b>https://github.com/kelvin34501/OakInk2-SimEnv-IsaacGym/wiki</b></a>
</h4>
<h4 align="center">Simulation environment startup <a
href="https://github.com/kelvin34501/OakInk2-SimEnv-IsaacGym/"><b>https://github.com/kelvin34501/OakInk2-SimEnv-IsaacGym/</b></a>
</h4>
</div>
<h1 id="challenge4">Multiview Egocentric Hand Tracking Challenge (MegoTrack) </h1>
<p>In XR applications, accurately estimating the hand poses from egocentric cameras is important to enable
social presence and interactions with the environment. The problem of hand pose estimation on XR headsets
presents unique challenges that differentiate it from existing problem statements. Participants will be
provided with calibrated stereo hand crop videos and MANO shape parameters. The expected results in this
challenge are MANO pose parameters and wrist transformations. The submitted pose parameters are supposed
to be used in combination with the provided MANO shape parameters to calculate keypoints and MPJPE, in
contrast to the existing protocol that allows the methods to directly produce tracking results in the form
of 3D keypoints or full MANO parameters, which does not penalize undesirable changes in hand shape. Also,
participants will be provided with calibrated stereo hand crop videos, assuming each video captures a
single subject. The expected result is the MANO shape parameters, which will be evaluated using vertex
errors in the neutral pose. </p>
<p>This year, we are excited to launch two distinct tracks:</p>
<ul>
<li>Hand Shape Estimation: Determine the hand shape using calibrated stereo videos.</li>
<li>Hand Pose Estimation: Track hand poses in calibrated stereo videos, utilizing pre-calibrated hand
shapes.</li>
</ul>
<p>To encourage developing generalizable methods, we’ll adopt two datasets featuring different headset
configurations: UmeTrack and HOT3D, the latter was recently introduced at CVPR.</p>
<div style="background-color: #e9e9e9; border: 1px solid black; text-align: center; word-wrap: break-word;" >
<h4 align="center">Check out the following links to get started</h4>
<h4 align="center">Challenge page: <a
href="https://eval.ai/web/challenges/challenge-page/2333/overview"><b>https://eval.ai/web/challenges/challenge-page/2333/overview</b></a>
</h4>
<h4 align="center">Hand Tracking Toolkit: <a
href="https://github.com/facebookresearch/hand_tracking_toolkit"><b>https://github.com/facebookresearch/hand_tracking_toolkit</b></a>
</h4>
<h4 align="center">UmeTrack dataset: <a
href="https://huggingface.co/datasets/facebook/hand_tracking_challenge_umetrack"><b>https://huggingface.co/datasets/facebook/hand_tracking_challenge_umetrack</b></a>
</h4>
</div>
<h1 id="contact">Contact</h1>
<p>[email protected]</p>
<div id="footer">
<p style="align-items: center;text-align: center;">
<a href="https://youtube.com/@handsworkshop" target="_Blank">
<img id="page1" alt="" src="profiles/youtube.jpg">
</a>
<a href="https://github.com/handsworkshop" target="_Blank">
<img id="page" alt="" src="profiles/github.png">
</a>
</p>
</div>
</div>
</div>
<script>
document.getElementById('outer_li_year').addEventListener('click', function (event) {
event.preventDefault(); // 阻止默认链接行为
// 获取第一个<li>标签中的年份
var year = document.querySelector('#outer_list > li:first-child > a').textContent.trim();
// 构建新的href
var newHref = 'workshop' + year + '.html';
// 跳转到新的页面
window.location.href = newHref;
});
document.getElementById('workshop_link').addEventListener('click', function (event) {
event.preventDefault(); // 阻止默认链接行为
// 获取第一个<li>标签中的年份
var year = document.querySelector('#outer_list > li:first-child > a').textContent.trim();
// 构建新的href
var newHref = 'workshop' + year + '.html';
// 跳转到新的页面
window.location.href = newHref;
});
document.getElementById('challenge_link').addEventListener('click', function (event) {
event.preventDefault(); // 阻止默认链接行为
// 获取第一个<li>标签中的年份
var year = document.querySelector('#outer_list > li:first-child > a').textContent.trim();
// 构建新的href
var newHref = 'challenge' + year + '.html';
// 跳转到新的页面
window.location.href = newHref;
});
// 获取所有带有id="style3"的a标签
var yearLinks = document.querySelectorAll('#style3');
yearLinks.forEach(function (link) {
link.addEventListener('click', function (event) {
event.preventDefault(); // 阻止默认链接行为
// 获取点击的年份
var selectedYear = this.textContent.trim();
// 更新第一个li中的年份显示
document.getElementById('current_year').textContent = selectedYear;
// 关闭下拉菜单
// document.getElementById('top_list').style.display = 'none';
// 可选:如果需要,可以在这里添加跳转逻辑
window.location.href = this.href;
});
});
var workshopLi1 = document.querySelector('#challenge_link');
workshopLi1.classList.add('highlight');
</script>
</body>
</html>