diff --git a/challenge_config.yaml b/challenge_config.yaml index ede451aed..e8327e44e 100755 --- a/challenge_config.yaml +++ b/challenge_config.yaml @@ -1,7 +1,7 @@ # If you are not sure what all these fields mean, please refer our documentation here: # https://evalai.readthedocs.io/en/latest/configuration.html -title: Random Number Generator Challenge -short_description: Random number generation challenge for each submission +title: Testing Out EvalAI +short_description: Just Testing Out EvalAI description: templates/description.html evaluation_details: templates/evaluation_details.html terms_and_conditions: templates/terms_and_conditions.html @@ -22,11 +22,11 @@ leaderboard: "labels": ["Metric1", "Metric2", "Metric3", "Total"], "default_order_by": "Total", "metadata": { - "Metric1": { + "Speed": { "sort_ascending": True, "description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", }, - "Metric2": { + "Time Completion": { "sort_ascending": True, "description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", } @@ -58,7 +58,7 @@ challenge_phases: is_visible: True submission_meta_attributes: - name: TextAttribute - description: Sample + description: TEST BY LEO type: text required: False - name: SingleOptionAttribute @@ -85,7 +85,7 @@ challenge_phases: start_date: 2019-01-01 00:00:00 end_date: 2099-05-24 23:59:59 test_annotation_file: annotations/test_annotations_testsplit.json - codename: test + codename: registration max_submissions_per_day: 5 max_submissions_per_month: 50 max_submissions: 50 @@ -114,7 +114,7 @@ challenge_phases: description: Sample type: boolean is_restricted_to_select_one_submission: False - is_partial_submission_evaluation_enabled: False + is_partial_submission_evaluation_enabled: True dataset_splits: - id: 1 diff --git a/evaluation_script/main.py b/evaluation_script/main.py index 61c73d9b5..32196ec8a 100644 --- a/evaluation_script/main.py +++ b/evaluation_script/main.py @@ -78,4 +78,6 @@ def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwarg # To display the results in the result file output["submission_result"] = output["result"][0] print("Completed evaluation for Test Phase") + elif phase_codename == "registration" + pass return output diff --git a/github/host_config.json b/github/host_config.json index e7ead6158..956b4f701 100644 --- a/github/host_config.json +++ b/github/host_config.json @@ -1,5 +1,5 @@ { - "token": "", - "team_pk": "", - "evalai_host_url": "" + "token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ0b2tlbl90eXBlIjoicmVmcmVzaCIsImV4cCI6MTcxMTQyNDM5OCwianRpIjoiZDc3YmNjYWU4NmQ3NDcyNzg3NTkxOTE3MzAyMGJmM2YiLCJ1c2VyX2lkIjo0MjB9.ppUrxx72fiI0DpEJ_3F6Icf1g0aQKa6pVngtFBGaEWQ", + "team_pk": "251", + "evalai_host_url": "https://staging.eval.ai" }