Datasets:

Modalities:
Text
Formats:
json
ArXiv:
Libraries:
Datasets
Dask
License:
guxiaowu commited on
Commit
ca6759e
·
verified ·
1 Parent(s): f40227d

Upload folder using huggingface_hub

Browse files
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
.gitattributes CHANGED
@@ -8,8 +8,6 @@
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
  *.model filter=lfs diff=lfs merge=lfs -text
15
  *.msgpack filter=lfs diff=lfs merge=lfs -text
@@ -27,7 +25,6 @@
27
  *.safetensors filter=lfs diff=lfs merge=lfs -text
28
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
  *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
  *.tflite filter=lfs diff=lfs merge=lfs -text
32
  *.tgz filter=lfs diff=lfs merge=lfs -text
33
  *.wasm filter=lfs diff=lfs merge=lfs -text
@@ -35,29 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.zip filter=lfs diff=lfs merge=lfs -text
36
  *.zst filter=lfs diff=lfs merge=lfs -text
37
  *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
60
- DOUBAO_ENDPOINT="ep-20250117120525-pp8fp"
61
- DOUBAO_1_5_ENDPOINT="ep-20250122173512-4tqwl"
62
- DOUBAO_API_KEY="43e9209b-5c60-478e-8f2f-1b6077f5dc57"
63
- DOUBAO_1_5_256K_ENDPOINT="ep-20250123113810-mxjq2"
 
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
 
11
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ scale-hf-logo.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ auto_evals/
2
+ venv/
3
+ __pycache__/
4
+ .env
5
+ .ipynb_checkpoints
6
+ *ipynb
7
+ .vscode/
8
+
9
+ eval-queue/
10
+ eval-results/
11
+ eval-queue-bk/
12
+ eval-results-bk/
13
+ logs/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ default_language_version:
16
+ python: python3
17
+
18
+ ci:
19
+ autofix_prs: true
20
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
21
+ autoupdate_schedule: quarterly
22
+
23
+ repos:
24
+ - repo: https://github.com/pre-commit/pre-commit-hooks
25
+ rev: v4.3.0
26
+ hooks:
27
+ - id: check-yaml
28
+ - id: check-case-conflict
29
+ - id: detect-private-key
30
+ - id: check-added-large-files
31
+ args: ['--maxkb=1000']
32
+ - id: requirements-txt-fixer
33
+ - id: end-of-file-fixer
34
+ - id: trailing-whitespace
35
+
36
+ - repo: https://github.com/PyCQA/isort
37
+ rev: 5.12.0
38
+ hooks:
39
+ - id: isort
40
+ name: Format imports
41
+
42
+ - repo: https://github.com/psf/black
43
+ rev: 22.12.0
44
+ hooks:
45
+ - id: black
46
+ name: Format code
47
+ additional_dependencies: ['click==8.0.2']
48
+
49
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
50
+ # Ruff version.
51
+ rev: 'v0.0.267'
52
+ hooks:
53
+ - id: ruff
Makefile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: style format
2
+
3
+
4
+ style:
5
+ python -m black --line-length 119 .
6
+ python -m isort .
7
+ ruff check --fix .
8
+
9
+
10
+ quality:
11
+ python -m black --check --line-length 119 .
12
+ python -m isort --check-only .
13
+ ruff check .
README.md CHANGED
@@ -1,42 +1,46 @@
1
  ---
2
- license: cc-by-4.0
 
 
 
 
 
 
 
 
 
3
  ---
4
 
5
- # Web-Bench
6
-
7
- English | [中文 README](README.zh_CN.md)
8
-
9
- ## 📖 Overview
10
-
11
- **Web-Bench** is a benchmark designed to evaluate the performance of LLMs in actual Web development. Web-Bench contains 50 projects, each consisting of 20 tasks with sequential dependencies. The tasks implement project features in sequence, simulating real-world human development workflows. When designing Web-Bench, we aim to cover the foundational elements of Web development: Web Standards and Web Frameworks. Given the scale and complexity of these projects, which were designed by engineers with 5-10 years of experience, each presents a significant challenge. On average, a single project takes 4–8 hours for a senior engineer to complete. On our given benchmark agent (Web-Agent), SOTA (Claude 3.7 Sonnet) achieves only 25.1\% Pass@1.
12
-
13
- The distribution of the experimental data aligns well with the current code generation capabilities of mainstream LLMs.
14
- <img width="500" alt="pass@1" src="./docs/assets/pass-1.png" />
15
-
16
- HumanEval and MBPP have approached saturation. APPS and EvalPlus are approaching saturation. The SOTA for Web-Bench is 25.1\%, which is lower (better) than that of the SWE-bench Full and Verified sets.
17
- <img width="500" alt="SOTAs" src="./docs/assets/sotas.png" />
18
-
19
- ## Web-Bench: A LLM Code Benchmark Based on Web Standards and Frameworks
20
- The datasets was presented in the paper [Web-Bench: A LLM Code Benchmark Based on Web Standards and Frameworks](https://huggingface.co/papers/2505.07473).
21
-
22
- ## 🏅 Leaderboard
23
-
24
- [Leaderboard](https://huggingface.co/spaces/bytedance-research/Web-Bench-Leaderboard)
25
-
26
-
27
- ## Dataset Structure
28
-
29
- An example of a Web-Bench datum is as follows:
30
-
31
- ```
32
- id: (str) Task id, init | task-n
33
- project: (str) Task project name
34
- description: (str) Task details description
35
- date: (str) Task publish date, filter contaminated model
36
- level: (str) Task level: easy | moderate | challenging
37
  ```
38
 
 
 
 
39
 
40
- ## 📘 Usage
41
 
42
- [GitHub](https://github.com/bytedance/web-bench)
 
 
 
 
1
  ---
2
+ title: Web Bench Leaderboard
3
+ emoji: 🥇
4
+ colorFrom: green
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: true
9
+ license: apache-2.0
10
+ short_description: Duplicate this leaderboard to initialize your own!
11
+ sdk_version: 5.19.0
12
  ---
13
 
14
+ # Start the configuration
15
+
16
+ Most of the variables to change for a default leaderboard are in `src/env.py` (replace the path for your leaderboard) and `src/about.py` (for tasks).
17
+
18
+ Results files should have the following format and be stored as json files:
19
+ ```json
20
+ {
21
+ "config": {
22
+ "model_dtype": "torch.float16", # or torch.bfloat16 or 8bit or 4bit
23
+ "model_name": "path of the model on the hub: org/model",
24
+ "model_sha": "revision on the hub",
25
+ },
26
+ "results": {
27
+ "task_name": {
28
+ "metric_name": score,
29
+ },
30
+ "task_name2": {
31
+ "metric_name": score,
32
+ }
33
+ }
34
+ }
 
 
 
 
 
 
 
 
 
 
 
35
  ```
36
 
37
+ Request files are created automatically by this tool.
38
+
39
+ If you encounter problem on the space, don't hesitate to restart it to remove the create eval-queue, eval-queue-bk, eval-results and eval-results-bk created folder.
40
 
41
+ # Code logic for more complex edits
42
 
43
+ You'll find
44
+ - the main table' columns names and properties in `src/display/utils.py`
45
+ - the logic to read all results and request files, then convert them in dataframe lines, in `src/leaderboard/read_evals.py`, and `src/populate.py`
46
+ - the logic to allow or filter submissions in `src/submission/submit.py` and `src/submission/check_validity.py`
app.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
+ import pandas as pd
4
+ from apscheduler.schedulers.background import BackgroundScheduler
5
+ from huggingface_hub import snapshot_download
6
+
7
+ from src.about import (
8
+ CITATION_BUTTON_LABEL,
9
+ CITATION_BUTTON_TEXT,
10
+ EVALUATION_QUEUE_TEXT,
11
+ INTRODUCTION_TEXT,
12
+ LLM_BENCHMARKS_TEXT,
13
+ TITLE,
14
+ )
15
+ from src.display.css_html_js import custom_css, block_css, js
16
+ from src.display.utils import (
17
+ BENCHMARK_COLS,
18
+ COLS,
19
+ EVAL_COLS,
20
+ EVAL_TYPES,
21
+ AutoEvalColumn,
22
+ ModelType,
23
+ fields,
24
+ WeightType,
25
+ )
26
+ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
27
+ from src.populate import get_evaluation_queue_df, get_leaderboard_df
28
+ from src.submission.submit import add_new_eval
29
+
30
+
31
+ def restart_space():
32
+ API.restart_space(repo_id=REPO_ID)
33
+
34
+ ### Space initialisation
35
+ try:
36
+ print(EVAL_REQUESTS_PATH)
37
+ snapshot_download(
38
+ repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
39
+ )
40
+ except Exception:
41
+ restart_space()
42
+ try:
43
+ print(EVAL_RESULTS_PATH)
44
+ snapshot_download(
45
+ repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
46
+ )
47
+ except Exception:
48
+ restart_space()
49
+
50
+
51
+ LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
52
+
53
+ (
54
+ finished_eval_queue_df,
55
+ running_eval_queue_df,
56
+ pending_eval_queue_df,
57
+ ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
58
+
59
+ def init_leaderboard(dataframe):
60
+ if dataframe is None or dataframe.empty:
61
+ raise ValueError("Leaderboard DataFrame is empty or None.")
62
+ print(f"dataframe {dataframe}")
63
+ return Leaderboard(
64
+ value=dataframe,
65
+ datatype=[c.type for c in fields(AutoEvalColumn)],
66
+ select_columns=SelectColumns(
67
+ default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
+ cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
+ label="Select Columns to Display:",
70
+ ),
71
+ search_columns=[AutoEvalColumn.model.name],
72
+ hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
+ filter_columns=[
74
+ ColumnFilter(AutoEvalColumn.provider.name, type="dropdown", label="Provider"),
75
+ # ColumnFilter(
76
+ # AutoEvalColumn.pass2.name,
77
+ # type="slider",
78
+ # min=0.01,
79
+ # max=100,
80
+ # label="Select the number of Pass@2",
81
+ # ),
82
+ ColumnFilter(AutoEvalColumn.openness.name, type="checkboxgroup", label="Openness"),
83
+ ],
84
+ bool_checkboxgroup_label="Hide models",
85
+ interactive=False,
86
+ )
87
+
88
+
89
+ demo = gr.Blocks(css=custom_css, js=js, elem_classes="custom-block")
90
+ with demo:
91
+ block = gr.Blocks(css=block_css, elem_classes="custom-block")
92
+ with block:
93
+ gr.HTML(TITLE)
94
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
95
+
96
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
97
+ with gr.TabItem("🏅 Benchmark", elem_id="llm-benchmark-tab-table1", id=1):
98
+ leaderboard = init_leaderboard(LEADERBOARD_DF)
99
+ with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table2", id=2):
100
+ gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
101
+
102
+ with gr.TabItem("🚀 Submit", elem_id="llm-benchmark-tab-table3", id=3):
103
+ gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
104
+ # with gr.Column():
105
+ # with gr.Row():
106
+
107
+
108
+ # with gr.Column():
109
+ # with gr.Accordion(
110
+ # f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
111
+ # open=False,
112
+ # ):
113
+ # with gr.Row():
114
+ # finished_eval_table = gr.components.Dataframe(
115
+ # value=finished_eval_queue_df,
116
+ # headers=EVAL_COLS,
117
+ # datatype=EVAL_TYPES,
118
+ # row_count=5,
119
+ # )
120
+ # with gr.Accordion(
121
+ # f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
122
+ # open=False,
123
+ # ):
124
+ # with gr.Row():
125
+ # running_eval_table = gr.components.Dataframe(
126
+ # value=running_eval_queue_df,
127
+ # headers=EVAL_COLS,
128
+ # datatype=EVAL_TYPES,
129
+ # row_count=5,
130
+ # )
131
+
132
+ # with gr.Accordion(
133
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
134
+ # open=False,
135
+ # ):
136
+ # with gr.Row():
137
+ # pending_eval_table = gr.components.Dataframe(
138
+ # value=pending_eval_queue_df,
139
+ # headers=EVAL_COLS,
140
+ # datatype=EVAL_TYPES,
141
+ # row_count=5,
142
+ # )
143
+ # with gr.Row():
144
+ # gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
145
+
146
+ # with gr.Row():
147
+ # with gr.Column():
148
+ # model_name_textbox = gr.Textbox(label="Model name")
149
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
150
+ # with gr.Column():
151
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
152
+ # submit_button = gr.Button("Submit Eval")
153
+ # submission_result = gr.Markdown()
154
+ # submit_button.click(
155
+ # add_new_eval,
156
+ # [
157
+ # model_name_textbox,
158
+ # base_model_name_textbox,
159
+ # revision_name_textbox,
160
+ # ],
161
+ # submission_result,
162
+ # )
163
+
164
+ # with gr.Row():
165
+ # with gr.Accordion("📙 Citation", open=False):
166
+ # citation_button = gr.Textbox(
167
+ # value=CITATION_BUTTON_TEXT,
168
+ # label=CITATION_BUTTON_LABEL,
169
+ # lines=20,
170
+ # elem_id="citation-button",
171
+ # show_copy_button=True,
172
+ # )
173
+
174
+ scheduler = BackgroundScheduler()
175
+ scheduler.add_job(restart_space, "interval", seconds=1800)
176
+ scheduler.start()
177
+ demo.queue(default_concurrency_limit=40).launch()
pyproject.toml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.ruff]
2
+ # Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
3
+ select = ["E", "F"]
4
+ ignore = ["E501"] # line too long (black is taking care of this)
5
+ line-length = 119
6
+ fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
7
+
8
+ [tool.isort]
9
+ profile = "black"
10
+ line_length = 119
11
+
12
+ [tool.black]
13
+ line-length = 119
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ APScheduler
2
+ black
3
+ datasets
4
+ gradio
5
+ gradio[oauth]
6
+ gradio_leaderboard==0.0.13
7
+ gradio_client
8
+ huggingface-hub>=0.18.0
9
+ matplotlib
10
+ numpy
11
+ pandas
12
+ python-dateutil
13
+ tqdm
14
+ transformers
15
+ tokenizers>=0.15.0
16
+ sentencepiece
src/.DS_Store ADDED
Binary file (6.15 kB). View file
 
src/about.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from enum import Enum
3
+
4
+ @dataclass
5
+ class Task:
6
+ benchmark: str
7
+ metric: str
8
+ col_name: str
9
+
10
+
11
+ # Select your tasks here
12
+ # ---------------------------------------------------
13
+ class Tasks(Enum):
14
+ # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
+ task0 = Task("anli_r1", "acc", "ANLI")
16
+ task1 = Task("logiqa", "acc_norm", "LogiQA")
17
+
18
+ NUM_FEWSHOT = 0 # Change with your few shot
19
+ # ---------------------------------------------------
20
+
21
+
22
+
23
+ # Your leaderboard name
24
+ TITLE = """<h1 align="center" id="space-title">Web-Bench Leaderboard</h1>"""
25
+
26
+ CONTAINER = """
27
+ <div id="main-container" style="display: flex; justify-content: center; align-items: center;">
28
+ </div>
29
+ """
30
+
31
+ # What does your leaderboard evaluate?
32
+ INTRODUCTION_TEXT = """
33
+
34
+ **Web-Bench** is a benchmark designed to evaluate the performance of LLMs in actual Web development. Web-Bench contains 50 projects, each consisting of 20 tasks with sequential dependencies. The tasks implement project features in sequence, simulating real-world human development workflows. When designing Web-Bench, we aim to cover the foundational elements of Web development: Web Standards and Web Frameworks. Given the scale and complexity of these projects, which were designed by engineers with 5-10 years of experience, each presents a significant challenge. On average, a single project takes 4–8 hours for a senior engineer to complete. On our given benchmark agent (Web-Agent), SOTA (Claude 3.7 Sonnet) achieves only 25.1\% Pass@1.
35
+ """
36
+
37
+ # Which evaluations are you running? how can people reproduce what you have?
38
+ LLM_BENCHMARKS_TEXT = f"""
39
+ ## More Information
40
+
41
+ More information could be found in [Paper](https://arxiv.org/abs/2505.07473) or [Github](https://github.com/bytedance/web-bench)
42
+
43
+ """
44
+
45
+ EVALUATION_QUEUE_TEXT = """
46
+ We welcome community submissions of new model evaluation results. Those submissions will be listed as 'External', and authors must upload their generated outputs for peer review.
47
+
48
+ ## Evaluation
49
+
50
+ Evaluation [Setup](https://github.com/bytedance/web-bench?tab=readme-ov-file#-set-up) and [Usage](https://github.com/bytedance/web-bench?tab=readme-ov-file#-usage). This will generate a markdown report summarizing the results.
51
+
52
+
53
+ ## Submission
54
+
55
+ To submit your results, create a **Pull Request** in the [Community Tab](https://huggingface.co/spaces/bytedance-research/Web-Bench-Leaderboard/discussions) to add them to the [src/custom-eval-results](https://huggingface.co/spaces/bytedance-research/Web-Bench-Leaderboard/tree/main/src/custom-eval-results) folder in this repository:
56
+ - Create a new folder named with your provider and model names (e.g., [provider_modelname_template](https://huggingface.co/spaces/bytedance-research/Web-Bench-Leaderboard/tree/main/src/custom-eval-results/provider_modelname_temple), using underscores to separate parts).
57
+ - Each folder stores the evaluation results of only one model.
58
+ - Add a `base_meta.json` file with the following fields:
59
+ - `Model`: the name of your model
60
+ - `ModelLink`: the link to the model page
61
+ - `Provider`: the name of the provider
62
+ - `Openness`: the openness of the model
63
+ - `Agent`: the agent used for evaluation, `Web-Agent` or your custom agent name
64
+ - Put your generated reports (e.g. `eval-20250513-102235`) in your folder.
65
+ - The title of the PR should be: [Community Submission] Model: org/model, Username: your_username.
66
+
67
+ We will review your submission and merge it upon acceptance.
68
+
69
+ **Tips**: `gen_meta.json` will be created after our review.
70
+
71
+ """
72
+
73
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
74
+ CITATION_BUTTON_TEXT = r"""
75
+ """
src/custom-eval-results/provider_modelname_temple/base_meta.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "Provider": "Temple",
3
+ "Openness": "Closed",
4
+ "Thinking": false,
5
+ "Agent": "Web-Agent",
6
+ "ModelLink": "Your ModelLink"
7
+ }
src/custom-eval-results/provider_modelname_temple/gen_meta.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Pass@2": 23.799999999999997,
3
+ "Pass@1": 17.169999999999998,
4
+ "Error@1": 8.15,
5
+ "Version": "v1.0.0",
6
+ "Source": "🔶 External",
7
+ "Model": "test",
8
+ "PRLink": ""
9
+ }
src/data.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+ import os
4
+
5
+ def get_first_level_dirs(dir_a):
6
+ # 检查目标目录是否存在
7
+ if not os.path.isdir(dir_a):
8
+ raise ValueError(f"目录不存在: {dir_a}")
9
+
10
+ # 遍历 dir_a 下的所有条目,筛选出一级文件夹
11
+ first_level_dirs = []
12
+ for entry in os.listdir(dir_a):
13
+ entry_path = os.path.join(dir_a, entry)
14
+ # 仅保留是目录且非符号链接的条目(避免误判软链接)
15
+ if os.path.isdir(entry_path) and not os.path.islink(entry_path):
16
+ first_level_dirs.append(entry) # 保存文件夹名称(或改为 entry_path 保存完整路径)
17
+
18
+ return first_level_dirs
19
+
20
+ def read_json_files(filename=None):
21
+ """
22
+ 读取当前目录下的 JSON 文件
23
+
24
+ 参数:
25
+ filename (str, optional): 指定要读取的文件名(例如 "data.json"),默认读取所有 JSON 文件
26
+
27
+ 返回:
28
+ 如果指定 filename: 返回对应文件的解析内容(字典/list)
29
+ 如果未指定 filename: 返回字典 {文件名: 内容}(例如 {"data": {...}})
30
+ """
31
+ current_dir = Path.cwd()
32
+ result = {}
33
+
34
+ try:
35
+ # 读取单个文件
36
+ if filename:
37
+ file_path = current_dir / 'src' / filename
38
+ print("JSON数据加载成功:", current_dir, file_path)
39
+
40
+ with open(file_path, "r", encoding="utf-8") as f:
41
+ return json.load(f)
42
+
43
+ # 批量读取所有 JSON 文件
44
+ for json_file in current_dir.glob("*.json"):
45
+ with open(json_file, "r", encoding="utf-8") as f:
46
+ result[json_file.stem] = json.load(f)
47
+ return result
48
+
49
+ except FileNotFoundError:
50
+ print(f"错误:文件 {filename} 不存在")
51
+ except json.JSONDecodeError:
52
+ print(f"错误:文件 {filename} 不是有效的 JSON 格式")
53
+ except Exception as e:
54
+ print(f"未知错误:{str(e)}")
55
+ return None
56
+
57
+
58
+
59
+ def get_custom_data():
60
+ dir_list = Path.cwd() / 'src' / 'custom-eval-results'
61
+ res = []
62
+ for dir in dir_list.iterdir():
63
+ print('dirname', dir.name != 'provider_modelname_temple', dir.name)
64
+ if dir.is_dir() and dir.name != 'provider_modelname_temple':
65
+ # base_meta
66
+ print("dirname", dir.name)
67
+ with open(dir / 'base_meta.json', "r", encoding="utf-8") as f:
68
+ base_meta = json.load(f)
69
+ with open(dir / 'gen_meta.json', "r", encoding="utf-8") as f:
70
+ gen_meta = json.load(f)
71
+
72
+ base_meta.update(gen_meta)
73
+ print(base_meta)
74
+ base_meta['Model'] = f"<a target=\"_blank\" href=\"{base_meta['ModelLink']}\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">{base_meta['Model']}</a>",
75
+ base_meta['Type'] = '🔶 External'
76
+ base_meta['Source'] = f"<a target=\"_blank\" href=\"{base_meta['PRLink']}\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">Pull Request</a>" if base_meta['PRLink'] else '',
77
+ res.append(base_meta)
78
+ return res
79
+
80
+
81
+ # # 示例用法
82
+ # if __name__ == "__main__":
83
+
84
+ # print(get_custom_data())
src/display/css_html_js.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ custom_css = """
2
+
3
+ .markdown-text {
4
+ font-size: 16px !important;
5
+ }
6
+
7
+ .hide-container:not([id^="component-"]) {
8
+ display: none;
9
+ }
10
+
11
+ #component-0 {
12
+ max-width: 1546px;
13
+ margin: 0 auto;
14
+ }
15
+
16
+ #models-to-add-text {
17
+ font-size: 18px !important;
18
+ }
19
+
20
+ #citation-button span {
21
+ font-size: 16px !important;
22
+ }
23
+
24
+ #citation-button textarea {
25
+ font-size: 16px !important;
26
+ }
27
+
28
+ #citation-button > label > button {
29
+ margin: 6px;
30
+ transform: scale(1.3);
31
+ }
32
+
33
+ #leaderboard-table {
34
+ margin-top: 15px
35
+ }
36
+
37
+ #leaderboard-table-lite {
38
+ margin-top: 15px
39
+ }
40
+
41
+ #search-bar-table-box > div:first-child {
42
+ background: none;
43
+ border: none;
44
+ }
45
+
46
+ #search-bar {
47
+ padding: 0px;
48
+ }
49
+
50
+ /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
51
+ #leaderboard-table td:nth-child(2),
52
+ #leaderboard-table th:nth-child(2) {
53
+ max-width: 400px;
54
+ overflow: auto;
55
+ white-space: nowrap;
56
+ }
57
+
58
+ .tab-buttons button {
59
+ font-size: 20px;
60
+ }
61
+
62
+ #scale-logo {
63
+ border-style: none !important;
64
+ box-shadow: none;
65
+ display: block;
66
+ margin-left: auto;
67
+ margin-right: auto;
68
+ max-width: 600px;
69
+ }
70
+
71
+ #scale-logo .download {
72
+ display: none;
73
+ }
74
+ #filter_type{
75
+ border: 0;
76
+ padding-left: 0;
77
+ padding-top: 0;
78
+ }
79
+ #filter_type label {
80
+ display: flex;
81
+ }
82
+ #filter_type label > span{
83
+ margin-top: var(--spacing-lg);
84
+ margin-right: 0.5em;
85
+ }
86
+ #filter_type label > .wrap{
87
+ width: 103px;
88
+ }
89
+ #filter_type label > .wrap .wrap-inner{
90
+ padding: 2px;
91
+ }
92
+ #filter_type label > .wrap .wrap-inner input{
93
+ width: 1px
94
+ }
95
+ #filter-columns-type{
96
+ border:0;
97
+ padding:0.5;
98
+ }
99
+ #filter-columns-size{
100
+ border:0;
101
+ padding:0.5;
102
+ }
103
+ #box-filter > .form{
104
+ border: 0
105
+ }
106
+ """
107
+
108
+ block_css = """
109
+ .custom_block {
110
+ max-width: 1536px;
111
+
112
+ }
113
+
114
+
115
+ """
116
+
117
+ get_window_url_params = """
118
+ function(url_params) {
119
+ const params = new URLSearchParams(window.location.search);
120
+ url_params = Object.fromEntries(params);
121
+ return url_params;
122
+ }
123
+ """
124
+
125
+ js="""
126
+ function() {
127
+ const comp = document.querySelector("body > div:nth-child(1) > div.gradio-container.gradio-container-5-19-0.svelte-1reuit1")
128
+ console.log("comp", comp)
129
+ comp.style.maxHeight = '1215px'
130
+ }
131
+ """
src/display/formatting.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def model_hyperlink(link, model_name):
2
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
3
+
4
+
5
+ def make_clickable_model(model_name):
6
+ link = f"https://huggingface.co/{model_name}"
7
+ return model_hyperlink(link, model_name)
8
+
9
+
10
+ def styled_error(error):
11
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
12
+
13
+
14
+ def styled_warning(warn):
15
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
16
+
17
+
18
+ def styled_message(message):
19
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
20
+
21
+
22
+ def has_no_nan_values(df, columns):
23
+ return df[columns].notna().all(axis=1)
24
+
25
+
26
+ def has_nan_values(df, columns):
27
+ return df[columns].isna().any(axis=1)
src/display/utils.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, make_dataclass
2
+ from enum import Enum
3
+
4
+ import pandas as pd
5
+
6
+ def fields(raw_class):
7
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
8
+
9
+
10
+ # These classes are for user facing column names,
11
+ # to avoid having to change them all around the code
12
+ # when a modif is needed
13
+ @dataclass
14
+ class ColumnContent:
15
+ name: str
16
+ type: str
17
+ displayed_by_default: bool
18
+ hidden: bool = False
19
+ never_hidden: bool = False
20
+
21
+ ## Leaderboard columns
22
+ auto_eval_column_dict = []
23
+ # Init
24
+ auto_eval_column_dict.append(["source_value", ColumnContent, ColumnContent("Type", "str", True, never_hidden=True)])
25
+ auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
26
+
27
+ # Model information
28
+ auto_eval_column_dict.append(["pass2", ColumnContent, ColumnContent("Pass@2", "number", True)])
29
+ auto_eval_column_dict.append(["pass1", ColumnContent, ColumnContent("Pass@1", "number", True)])
30
+ auto_eval_column_dict.append(["error1", ColumnContent, ColumnContent("Error@1", "number", True)])
31
+ auto_eval_column_dict.append(["source", ColumnContent, ColumnContent("Source", "markdown", True, False)])
32
+ auto_eval_column_dict.append(["provider", ColumnContent, ColumnContent("Provider", "number", False)])
33
+ auto_eval_column_dict.append(["version", ColumnContent, ColumnContent("Version", "str", False, True)])
34
+ auto_eval_column_dict.append(["agent", ColumnContent, ColumnContent("Agent", "str", False)])
35
+ auto_eval_column_dict.append(["openness", ColumnContent, ColumnContent("Openness", "str", False)])
36
+ auto_eval_column_dict.append(["thinking", ColumnContent, ColumnContent("Thinking", "boolean", True)])
37
+
38
+ # We use make dataclass to dynamically fill the scores from Tasks
39
+ AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
40
+
41
+ ## For the queue columns in the submission tab
42
+ @dataclass(frozen=True)
43
+ class EvalQueueColumn: # Queue column
44
+ model = ColumnContent("model", "markdown", True)
45
+ private = ColumnContent("private", "bool", True)
46
+ status = ColumnContent("status", "str", True)
47
+
48
+ ## All the model information that we might need
49
+ @dataclass
50
+ class ModelDetails:
51
+ name: str
52
+ display_name: str = ""
53
+ symbol: str = "" # emoji
54
+
55
+
56
+ class ModelType(Enum):
57
+ PT = ModelDetails(name="pretrained", symbol="🟢")
58
+ FT = ModelDetails(name="fine-tuned", symbol="🔶")
59
+ IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
60
+ RL = ModelDetails(name="RL-tuned", symbol="🟦")
61
+ Unknown = ModelDetails(name="", symbol="?")
62
+
63
+ def to_str(self, separator=" "):
64
+ return f"{self.value.symbol}{separator}{self.value.name}"
65
+
66
+ @staticmethod
67
+ def from_str(type):
68
+ if "fine-tuned" in type or "🔶" in type:
69
+ return ModelType.FT
70
+ if "pretrained" in type or "🟢" in type:
71
+ return ModelType.PT
72
+ if "RL-tuned" in type or "🟦" in type:
73
+ return ModelType.RL
74
+ if "instruction-tuned" in type or "⭕" in type:
75
+ return ModelType.IFT
76
+ return ModelType.Unknown
77
+
78
+ class WeightType(Enum):
79
+ Adapter = ModelDetails("Adapter")
80
+ Original = ModelDetails("Original")
81
+ Delta = ModelDetails("Delta")
82
+
83
+ # Column selection
84
+ COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
85
+
86
+ EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
87
+ EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
88
+
89
+ BENCHMARK_COLS = []
90
+
src/envs.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from huggingface_hub import HfApi
4
+
5
+ # Info to change for your repository
6
+ # ----------------------------------
7
+ TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
+
9
+ OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
+ # ----------------------------------
11
+
12
+ REPO_ID = f"{OWNER}/leaderboard"
13
+ QUEUE_REPO = f"{OWNER}/requests"
14
+ RESULTS_REPO = f"{OWNER}/results"
15
+
16
+ # If you setup a cache later, just change HF_HOME
17
+ CACHE_PATH=os.getenv("HF_HOME", ".")
18
+
19
+ # Local caches
20
+ EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
+ EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
+ EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
+ EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
+
25
+ API = HfApi(token=TOKEN)
src/leaderboard/read_evals.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import json
3
+ import math
4
+ import os
5
+ from dataclasses import dataclass
6
+
7
+ import dateutil
8
+ import numpy as np
9
+
10
+ from src.display.formatting import make_clickable_model
11
+ from src.display.utils import AutoEvalColumn, ModelType, WeightType
12
+ from src.submission.check_validity import is_model_on_hub
13
+
14
+
15
+ @dataclass
16
+ class EvalResult:
17
+ """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
+ """
19
+ eval_name: str # org_model_precision (uid)
20
+ full_model: str # org/model (path on hub)
21
+ org: str
22
+ model: str
23
+ results: dict
24
+ num_params: int = 0
25
+ date: str = "" # submission date of request file
26
+
27
+ @classmethod
28
+ def init_from_json_file(self, json_filepath):
29
+ """Inits the result from the specific model result file"""
30
+ with open(json_filepath) as fp:
31
+ data = json.load(fp)
32
+
33
+ config = data.get("config")
34
+
35
+ # Get model and org
36
+ org_and_model = config.get("model_name", config.get("model_args", None))
37
+ org_and_model = org_and_model.split("/", 1)
38
+
39
+ if len(org_and_model) == 1:
40
+ org = None
41
+ model = org_and_model[0]
42
+ else:
43
+ org = org_and_model[0]
44
+ model = org_and_model[1]
45
+ result_key = f"{org}_{model}"
46
+ full_model = "/".join(org_and_model)
47
+
48
+ # Extract results available in this file (some results are split in several files)
49
+ results = {}
50
+
51
+ return self(
52
+ eval_name=result_key,
53
+ full_model=full_model,
54
+ org=org,
55
+ model=model,
56
+ results=results,
57
+ )
58
+
59
+ def update_with_request_file(self, requests_path):
60
+ """Finds the relevant request file for the current model and updates info with it"""
61
+ request_file = get_request_file_for_model(requests_path, self.full_model)
62
+
63
+ try:
64
+ with open(request_file, "r") as f:
65
+ request = json.load(f)
66
+ self.pass2 = request.get("pass2", 0)
67
+ self.pass1 = request.get("pass1", 0)
68
+ self.error1 = request.get("error1", 0)
69
+ self.openness = request.get("openness", 'Closed')
70
+ self.provider = request.get("provider", 'Unknown')
71
+ self.source = request.get("source", 'Unknown')
72
+ self.source_value = request.get("source_value", 'Unknown')
73
+ self.agent = request.get("agent", 'Web-Agent')
74
+ self.version = request.get("version", 'v1.0.0')
75
+ self.thinking = request.get("thinking", True)
76
+ self.date = request.get("submitted_time", "")
77
+ except Exception:
78
+ print(f"Could not find request file for {self.org}/{self.model}")
79
+
80
+ def to_dict(self):
81
+ """Converts the Eval Result to a dict compatible with our dataframe display"""
82
+ data_dict = {
83
+ "eval_name": self.eval_name, # not a column, just a save name,
84
+ AutoEvalColumn.model.name: make_clickable_model(self.full_model),
85
+ AutoEvalColumn.pass2.name: self.pass2,
86
+ AutoEvalColumn.pass1.name: self.pass1,
87
+ AutoEvalColumn.openness.name: self.openness,
88
+ AutoEvalColumn.error1.name: self.error1,
89
+ AutoEvalColumn.provider.name: self.provider,
90
+ AutoEvalColumn.source.name: self.source,
91
+ AutoEvalColumn.source_value.name: self.source_value,
92
+ AutoEvalColumn.version.name: self.version,
93
+ AutoEvalColumn.agent.name: self.agent,
94
+ AutoEvalColumn.thinking.name: self.thinking,
95
+ }
96
+
97
+
98
+ return data_dict
99
+
100
+
101
+ def get_request_file_for_model(requests_path, model_name):
102
+ """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
103
+ request_files = os.path.join(
104
+ requests_path,
105
+ f"{model_name}_eval_request_*.json",
106
+ )
107
+ request_files = glob.glob(request_files)
108
+ print("")
109
+ request_file = ""
110
+ request_files = sorted(request_files, reverse=True)
111
+ for tmp_request_file in request_files:
112
+ with open(tmp_request_file, "r") as f:
113
+ req_content = json.load(f)
114
+ if (
115
+ req_content["status"] in ["FINISHED"]
116
+ ):
117
+ request_file = tmp_request_file
118
+ return request_file
119
+
120
+
121
+ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
122
+ """From the path of the results folder root, extract all needed info for results"""
123
+ model_result_filepaths = []
124
+
125
+ for root, _, files in os.walk(results_path):
126
+ # We should only have json files in model results
127
+ if len(files) == 0 or any([not f.endswith(".json") for f in files]):
128
+ continue
129
+
130
+ # Sort the files by date
131
+ try:
132
+ files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
133
+ except dateutil.parser._parser.ParserError:
134
+ files = [files[-1]]
135
+
136
+ for file in files:
137
+ model_result_filepaths.append(os.path.join(root, file))
138
+
139
+ eval_results = {}
140
+ for model_result_filepath in model_result_filepaths:
141
+ # Creation of result
142
+ eval_result = EvalResult.init_from_json_file(model_result_filepath)
143
+ eval_result.update_with_request_file(requests_path)
144
+
145
+ # Store results of same eval together
146
+ eval_name = eval_result.eval_name
147
+ if eval_name in eval_results.keys():
148
+ eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
149
+ else:
150
+ eval_results[eval_name] = eval_result
151
+
152
+ results = []
153
+ for v in eval_results.values():
154
+ try:
155
+ v.to_dict() # we test if the dict version is complete
156
+ results.append(v)
157
+ except KeyError: # not all eval values present
158
+ continue
159
+
160
+ return results
src/populate.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import pandas as pd
5
+ from pathlib import Path
6
+
7
+ from src.display.formatting import has_no_nan_values, make_clickable_model
8
+ from src.display.utils import AutoEvalColumn, EvalQueueColumn
9
+ from src.leaderboard.read_evals import get_raw_eval_results
10
+ from src.data import read_json_files, get_custom_data
11
+ from src.envs import CACHE_PATH
12
+
13
+
14
+ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
15
+ """Creates a dataframe from all the individual experiment results"""
16
+
17
+ json_data = read_json_files('results.json')
18
+ print('read_json_files', json_data)
19
+ current_dir = Path.cwd()
20
+
21
+ for item in json_data:
22
+ val = { 'Type': '🟢 Base' }
23
+ item.update(val)
24
+
25
+ if len(get_custom_data()) > 0:
26
+ custom_data = get_custom_data()
27
+ json_data.extend(custom_data)
28
+
29
+ all_data_json = json_data
30
+
31
+ print('CACHE_PATH', CACHE_PATH, results_path, current_dir)
32
+
33
+ df = pd.DataFrame.from_records(all_data_json)
34
+ df = df.sort_values(by=[AutoEvalColumn.pass2.name], ascending=False)
35
+ df = df[cols].round(decimals=2)
36
+
37
+ # filter out if any of the benchmarks have not been produced
38
+ df = df[has_no_nan_values(df, benchmark_cols)]
39
+ return df
40
+
41
+
42
+ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
43
+ """Creates the different dataframes for the evaluation queues requestes"""
44
+ entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
45
+ all_evals = []
46
+
47
+ for entry in entries:
48
+ if ".json" in entry:
49
+ file_path = os.path.join(save_path, entry)
50
+ with open(file_path) as fp:
51
+ data = json.load(fp)
52
+
53
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
54
+
55
+ all_evals.append(data)
56
+ elif ".md" not in entry:
57
+ # this is a folder
58
+ sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
59
+ for sub_entry in sub_entries:
60
+ file_path = os.path.join(save_path, entry, sub_entry)
61
+ with open(file_path) as fp:
62
+ data = json.load(fp)
63
+
64
+ data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
65
+ all_evals.append(data)
66
+
67
+ pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
68
+ running_list = [e for e in all_evals if e["status"] == "RUNNING"]
69
+ finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
70
+ df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
71
+ df_running = pd.DataFrame.from_records(running_list, columns=cols)
72
+ df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
73
+ return df_finished[cols], df_running[cols], df_pending[cols]
src/results.json ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "eval_name": "gpt-4o",
4
+ "Model": "<a target=\"_blank\" href=\"https://platform.openai.com/docs/models/gpt-4o\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gpt-4o</a>",
5
+ "Pass@2": 23.799999999999997,
6
+ "Pass@1": 17.169999999999998,
7
+ "Error@1": 8.15,
8
+ "Provider": "OpenAI",
9
+ "Openness": "Closed",
10
+ "Thinking": false,
11
+ "Version": "v1.0.0",
12
+ "Agent": "Web-Agent",
13
+ "Source": ""
14
+ },
15
+ {
16
+ "eval_name": "gpt-4o-mini",
17
+ "Model": "<a target=\"_blank\" href=\"https://platform.openai.com/docs/models/gpt-4o-mini\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gpt-4o-mini</a>",
18
+ "Pass@2": 13.04,
19
+ "Pass@1": 8.48,
20
+ "Error@1": 7.28,
21
+ "Provider": "OpenAI",
22
+ "Openness": "Closed",
23
+ "Thinking": false,
24
+ "Version": "v1.0.0",
25
+ "Agent": "Web-Agent",
26
+ "Source": ""
27
+ },
28
+ {
29
+ "eval_name": "gpt-4.1",
30
+ "Model": "<a target=\"_blank\" href=\"https://platform.openai.com/docs/models/gpt-4.1\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gpt-4.1</a>",
31
+ "Pass@2": 25.11,
32
+ "Pass@1": 21.09,
33
+ "Error@1": 6.52,
34
+ "Provider": "OpenAI",
35
+ "Openness": "Closed",
36
+ "Thinking": false,
37
+ "Version": "v1.0.0",
38
+ "Agent": "Web-Agent",
39
+ "Source": ""
40
+ },
41
+ {
42
+ "eval_name": "o3-mini",
43
+ "Model": "<a target=\"_blank\" href=\"https://platform.openai.com/docs/models/o3-mini\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">o3-mini</a>",
44
+ "Pass@2": 14.24,
45
+ "Pass@1": 9.13,
46
+ "Error@1": 7.93,
47
+ "Provider": "OpenAI",
48
+ "Openness": "Closed",
49
+ "Thinking": false,
50
+ "Version": "v1.0.0",
51
+ "Agent": "Web-Agent",
52
+ "Source": ""
53
+ },
54
+ {
55
+ "eval_name": "o1",
56
+ "Model": "<a target=\"_blank\" href=\"https://platform.openai.com/docs/models/o1\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">o1</a>",
57
+ "Pass@2": 12.389999999999999,
58
+ "Pass@1": 10.43,
59
+ "Error@1": 9.89,
60
+ "Provider": "OpenAI",
61
+ "Openness": "Closed",
62
+ "Thinking": false,
63
+ "Version": "v1.0.0",
64
+ "Agent": "Web-Agent",
65
+ "Source": ""
66
+ },
67
+ {
68
+ "eval_name": "gpt-4.1-mini",
69
+ "Model": "<a target=\"_blank\" href=\"https://platform.openai.com/docs/models/gpt-4.1-mini\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gpt-4.1-mini</a>",
70
+ "Pass@2": 23.7,
71
+ "Pass@1": 20.76,
72
+ "Error@1": 6.8500000000000005,
73
+ "Provider": "OpenAI",
74
+ "Openness": "Closed",
75
+ "Thinking": false,
76
+ "Version": "v1.0.0",
77
+ "Agent": "Web-Agent",
78
+ "Source": ""
79
+ },
80
+ {
81
+ "eval_name": "gpt-4.1-nano",
82
+ "Model": "<a target=\"_blank\" href=\"https://platform.openai.com/docs/models/gpt-4.1-nano\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gpt-4.1-nano</a>",
83
+ "Pass@2": 12.280000000000001,
84
+ "Pass@1": 7.07,
85
+ "Error@1": 6.959999999999999,
86
+ "Provider": "OpenAI",
87
+ "Openness": "Closed",
88
+ "Thinking": false,
89
+ "Version": "v1.0.0",
90
+ "Agent": "Web-Agent",
91
+ "Source": ""
92
+ },
93
+ {
94
+ "eval_name": "o4-mini",
95
+ "Model": "<a target=\"_blank\" href=\"https://platform.openai.com/docs/models/o4-mini\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">o4-mini</a>",
96
+ "Pass@2": 22.93,
97
+ "Pass@1": 13.26,
98
+ "Error@1": 9.89,
99
+ "Provider": "OpenAI",
100
+ "Openness": "Closed",
101
+ "Thinking": false,
102
+ "Version": "v1.0.0",
103
+ "Agent": "Web-Agent",
104
+ "Source": ""
105
+ },
106
+ {
107
+ "eval_name": "claude-3-7-sonnet-20250219",
108
+ "Model": "<a target=\"_blank\" href=\"https://www.anthropic.com/news/claude-3-7-sonnet\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">claude-3-7-sonnet-20250219</a>",
109
+ "Pass@2": 30.98,
110
+ "Pass@1": 22.5,
111
+ "Error@1": 8.260000000000002,
112
+ "Provider": "Anthropic",
113
+ "Openness": "Closed",
114
+ "Thinking": false,
115
+ "Version": "v1.0.0",
116
+ "Agent": "Web-Agent",
117
+ "Source": ""
118
+ },
119
+ {
120
+ "eval_name": "claude-3-5-sonnet-20241022",
121
+ "Model": "<a target=\"_blank\" href=\"https://www.anthropic.com/news/3-5-models-and-computer-use\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">claude-3-5-sonnet-20241022</a>",
122
+ "Pass@2": 32.39,
123
+ "Pass@1": 23.04,
124
+ "Error@1": 9.46,
125
+ "Provider": "Anthropic",
126
+ "Openness": "Closed",
127
+ "Thinking": false,
128
+ "Version": "v1.0.0",
129
+ "Agent": "Web-Agent",
130
+ "Source": ""
131
+ },
132
+ {
133
+ "eval_name": "claude-3-5-haiku-20241022",
134
+ "Model": "<a target=\"_blank\" href=\"https://www.anthropic.com/news/3-5-models-and-computer-use\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">claude-3-5-haiku-20241022</a>",
135
+ "Pass@2": 21.740000000000002,
136
+ "Pass@1": 15.43,
137
+ "Error@1": 7.61,
138
+ "Provider": "Anthropic",
139
+ "Openness": "Closed",
140
+ "Thinking": false,
141
+ "Version": "v1.0.0",
142
+ "Agent": "Web-Agent",
143
+ "Source": ""
144
+ },
145
+ {
146
+ "eval_name": "claude-3-5-sonnet-20240620",
147
+ "Model": "<a target=\"_blank\" href=\"https://www.anthropic.com/news/claude-3-5-sonnet\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">claude-3-5-sonnet-20240620</a>",
148
+ "Pass@2": 30.330000000000002,
149
+ "Pass@1": 21.959999999999997,
150
+ "Error@1": 8.799999999999999,
151
+ "Provider": "Anthropic",
152
+ "Openness": "Closed",
153
+ "Thinking": false,
154
+ "Version": "v1.0.0",
155
+ "Agent": "Web-Agent",
156
+ "Source": ""
157
+ },
158
+ {
159
+ "eval_name": "claude-3-7-sonnet-20250219-thinking",
160
+ "Model": "<a target=\"_blank\" href=\"https://www.anthropic.com/news/claude-3-7-sonnet\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">claude-3-7-sonnet-20250219-thinking</a>",
161
+ "Pass@2": 35.33,
162
+ "Pass@1": 25.11,
163
+ "Error@1": 9.02,
164
+ "Provider": "Anthropic",
165
+ "Openness": "Closed",
166
+ "Thinking": true,
167
+ "Version": "v1.0.0",
168
+ "Agent": "Web-Agent",
169
+ "Source": ""
170
+ },
171
+ {
172
+ "eval_name": "deepseek-r1",
173
+ "Model": "<a target=\"_blank\" href=\"https://api-docs.deepseek.com/news/news250120\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">deepseek-r1</a>",
174
+ "Pass@2": 26.200000000000003,
175
+ "Pass@1": 14.46,
176
+ "Error@1": 9.89,
177
+ "Provider": "DeepSeek",
178
+ "Openness": "Open",
179
+ "Thinking": false,
180
+ "Version": "v1.0.0",
181
+ "Agent": "Web-Agent",
182
+ "Source": ""
183
+ },
184
+ {
185
+ "eval_name": "deepseek-coder-v2",
186
+ "Model": "<a target=\"_blank\" href=\"https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">deepseek-coder-v2</a>",
187
+ "Pass@2": 23.150000000000002,
188
+ "Pass@1": 16.74,
189
+ "Error@1": 7.829999999999999,
190
+ "Provider": "DeepSeek",
191
+ "Openness": "Open",
192
+ "Thinking": false,
193
+ "Version": "v1.0.0",
194
+ "Agent": "Web-Agent",
195
+ "Source": ""
196
+ },
197
+ {
198
+ "eval_name": "deepseek-v3-0324",
199
+ "Model": "<a target=\"_blank\" href=\"https://api-docs.deepseek.com/news/news250325\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">deepseek-v3-0324</a>",
200
+ "Pass@2": 23.59,
201
+ "Pass@1": 17.07,
202
+ "Error@1": 7.93,
203
+ "Provider": "DeepSeek",
204
+ "Openness": "Open",
205
+ "Thinking": false,
206
+ "Version": "v1.0.0",
207
+ "Agent": "Web-Agent",
208
+ "Source": ""
209
+ },
210
+ {
211
+ "eval_name": "gemini-2.0-flash",
212
+ "Model": "<a target=\"_blank\" href=\"https://ai.google.dev/gemini-api/docs/models#gemini-2.0-flash\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gemini-2.0-flash</a>",
213
+ "Pass@2": 20.87,
214
+ "Pass@1": 15.329999999999998,
215
+ "Error@1": 7.07,
216
+ "Provider": "Google",
217
+ "Openness": "Closed",
218
+ "Thinking": false,
219
+ "Version": "v1.0.0",
220
+ "Agent": "Web-Agent",
221
+ "Source": ""
222
+ },
223
+ {
224
+ "eval_name": "gemini-pro-1.5",
225
+ "Model": "<a target=\"_blank\" href=\"https://ai.google.dev/gemini-api/docs/models#gemini-1.5-pro\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gemini-pro-1.5</a>",
226
+ "Pass@2": 20.87,
227
+ "Pass@1": 14.78,
228
+ "Error@1": 7.28,
229
+ "Provider": "Google",
230
+ "Openness": "Closed",
231
+ "Thinking": false,
232
+ "Version": "v1.0.0",
233
+ "Agent": "Web-Agent",
234
+ "Source": ""
235
+ },
236
+ {
237
+ "eval_name": "gemini-flash-1.5",
238
+ "Model": "<a target=\"_blank\" href=\"https://ai.google.dev/gemini-api/docs/models#gemini-1.5-flash\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gemini-flash-1.5</a>",
239
+ "Pass@2": 17.07,
240
+ "Pass@1": 12.83,
241
+ "Error@1": 7.07,
242
+ "Provider": "Google",
243
+ "Openness": "Closed",
244
+ "Thinking": false,
245
+ "Version": "v1.0.0",
246
+ "Agent": "Web-Agent",
247
+ "Source": ""
248
+ },
249
+ {
250
+ "eval_name": "gemini-2.5-pro-03-25",
251
+ "Model": "<a target=\"_blank\" href=\"https://ai.google.dev/gemini-api/docs/models?hl=zh-cn#gemini-2.5-pro-preview-05-06\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gemini-2.5-pro-03-25</a>",
252
+ "Pass@2": 24.02,
253
+ "Pass@1": 15.67,
254
+ "Error@1": 8.04,
255
+ "Provider": "Google",
256
+ "Openness": "Closed",
257
+ "Thinking": false,
258
+ "Version": "v1.0.0",
259
+ "Agent": "Web-Agent",
260
+ "Source": ""
261
+ },
262
+ {
263
+ "eval_name": "gemini-2.0-flash-thinking",
264
+ "Model": "<a target=\"_blank\" href=\"https://ai.google.dev/gemini-api/docs/models?hl=zh-cn#gemini-2.5-pro-preview-05-06\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gemini-2.0-flash-thinking</a>",
265
+ "Pass@2": 19.24,
266
+ "Pass@1": 14.89,
267
+ "Error@1": 6.959999999999999,
268
+ "Provider": "Google",
269
+ "Openness": "Closed",
270
+ "Thinking": true,
271
+ "Version": "v1.0.0",
272
+ "Agent": "Web-Agent",
273
+ "Source": ""
274
+ },
275
+ {
276
+ "eval_name": "gemma-3-27b",
277
+ "Model": "<a target=\"_blank\" href=\"https://huggingface.co/google/gemma-3-27b-it\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gemma-3-27b</a>",
278
+ "Pass@2": 11.85,
279
+ "Pass@1": 9.89,
280
+ "Error@1": 6.2,
281
+ "Provider": "Google",
282
+ "Openness": "Open",
283
+ "Thinking": false,
284
+ "Version": "v1.0.0",
285
+ "Agent": "Web-Agent",
286
+ "Source": ""
287
+ },
288
+ {
289
+ "eval_name": "gemini-2.5-pro-0506",
290
+ "Model": "<a target=\"_blank\" href=\"https://ai.google.dev/gemini-api/docs/models?hl=zh-cn#gemini-2.5-pro-preview-05-06\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">gemini-2.5-pro-0506</a>",
291
+ "Pass@2": 31.96,
292
+ "Pass@1": 20.76,
293
+ "Error@1": 8.7,
294
+ "Provider": "Google",
295
+ "Openness": "Closed",
296
+ "Thinking": false,
297
+ "Version": "v1.0.0",
298
+ "Agent": "Web-Agent",
299
+ "Source": ""
300
+ },
301
+ {
302
+ "eval_name": "llama-3.3",
303
+ "Model": "<a target=\"_blank\" href=\"https://www.llama.com/docs/model-cards-and-prompt-formats/llama3_3/\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">llama-3.3</a>",
304
+ "Pass@2": 9.569999999999999,
305
+ "Pass@1": 6.63,
306
+ "Error@1": 6.2,
307
+ "Provider": "Meta",
308
+ "Openness": "Open",
309
+ "Thinking": false,
310
+ "Version": "v1.0.0",
311
+ "Agent": "Web-Agent",
312
+ "Source": ""
313
+ },
314
+ {
315
+ "eval_name": "llama-4 Scout",
316
+ "Model": "<a target=\"_blank\" href=\"https://www.llama.com/docs/model-cards-and-prompt-formats/llama4_omni/\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">llama-4 Scout</a>",
317
+ "Pass@2": 7.720000000000001,
318
+ "Pass@1": 5,
319
+ "Error@1": 6.63,
320
+ "Provider": "Meta",
321
+ "Openness": "Open",
322
+ "Thinking": false,
323
+ "Version": "v1.0.0",
324
+ "Agent": "Web-Agent",
325
+ "Source": ""
326
+ },
327
+ {
328
+ "eval_name": "llama-4 Maverick",
329
+ "Model": "<a target=\"_blank\" href=\"https://www.llama.com/docs/model-cards-and-prompt-formats/llama4_omni/\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">llama-4 Maverick</a>",
330
+ "Pass@2": 20.87,
331
+ "Pass@1": 15.98,
332
+ "Error@1": 7.07,
333
+ "Provider": "Meta",
334
+ "Openness": "Open",
335
+ "Thinking": false,
336
+ "Version": "v1.0.0",
337
+ "Agent": "Web-Agent",
338
+ "Source": ""
339
+ },
340
+ {
341
+ "eval_name": "qwen-max-2025-01-25",
342
+ "Model": "<a target=\"_blank\" href=\"https://www.alibabacloud.com/help/en/model-studio/what-is-qwen-llm#c2d5833ae4jmo\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">qwen-max-2025-01-25</a>",
343
+ "Pass@2": 19.02,
344
+ "Pass@1": 15.870000000000001,
345
+ "Error@1": 6.3,
346
+ "Provider": "QWen",
347
+ "Openness": "Open",
348
+ "Thinking": false,
349
+ "Version": "v1.0.0",
350
+ "Agent": "Web-Agent",
351
+ "Source": ""
352
+ },
353
+ {
354
+ "eval_name": "qwen-plus-2025-01-25",
355
+ "Model": "<a target=\"_blank\" href=\"https://www.alibabacloud.com/help/en/model-studio/what-is-qwen-llm#6ad3cd90f0c5r\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">qwen-plus-2025-01-25</a>",
356
+ "Pass@2": 15.110000000000001,
357
+ "Pass@1": 11.85,
358
+ "Error@1": 6.2,
359
+ "Provider": "QWen",
360
+ "Openness": "Open",
361
+ "Thinking": false,
362
+ "Version": "v1.0.0",
363
+ "Agent": "Web-Agent",
364
+ "Source": ""
365
+ },
366
+ {
367
+ "eval_name": "qwen-2.5-72b-instruct",
368
+ "Model": "<a target=\"_blank\" href=\"https://huggingface.co/Qwen/Qwen2.5-72B-Instruct\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">qwen-2.5-72b-instruct</a>",
369
+ "Pass@2": 13.700000000000001,
370
+ "Pass@1": 10.54,
371
+ "Error@1": 6.63,
372
+ "Provider": "QWen",
373
+ "Openness": "Open",
374
+ "Thinking": false,
375
+ "Version": "v1.0.0",
376
+ "Agent": "Web-Agent",
377
+ "Source": ""
378
+ },
379
+ {
380
+ "eval_name": "qwen-turbo-2024-11-01",
381
+ "Model": "<a target=\"_blank\" href=\"https://www.alibabacloud.com/help/en/model-studio/what-is-qwen-llm#ede6678dedqbz\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">qwen-turbo-2024-11-01</a>",
382
+ "Pass@2": 5.11,
383
+ "Pass@1": 2.6100000000000003,
384
+ "Error@1": 6.8500000000000005,
385
+ "Provider": "QWen",
386
+ "Openness": "Open",
387
+ "Thinking": false,
388
+ "Version": "v1.0.0",
389
+ "Agent": "Web-Agent",
390
+ "Source": ""
391
+ },
392
+ {
393
+ "eval_name": "mistral-large-2411",
394
+ "Model": "<a target=\"_blank\" href=\"https://huggingface.co/mistralai/Pixtral-Large-Instruct-2411\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">mistral-large-2411</a>",
395
+ "Pass@2": 18.7,
396
+ "Pass@1": 13.04,
397
+ "Error@1": 7.28,
398
+ "Provider": "MistralAI",
399
+ "Openness": "Open",
400
+ "Thinking": false,
401
+ "Version": "v1.0.0",
402
+ "Agent": "Web-Agent",
403
+ "Source": ""
404
+ },
405
+ {
406
+ "eval_name": "grok-2-1212",
407
+ "Model": "<a target=\"_blank\" href=\"https://openrouter.ai/x-ai/grok-2-vision-1212\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">grok-2-1212</a>",
408
+ "Pass@2": 17.169999999999998,
409
+ "Pass@1": 11.3,
410
+ "Error@1": 7.93,
411
+ "Provider": "X-AI",
412
+ "Openness": "Closed",
413
+ "Thinking": false,
414
+ "Version": "v1.0.0",
415
+ "Agent": "Web-Agent",
416
+ "Source": ""
417
+ },
418
+ {
419
+ "eval_name": "doubao-pro-1.5-32k",
420
+ "Model": "<a target=\"_blank\" href=\"https://www.volcengine.com/docs/82379/1554678\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">doubao-pro-1.5-32k</a>",
421
+ "Pass@2": 22.93,
422
+ "Pass@1": 16.63,
423
+ "Error@1": 7.28,
424
+ "Provider": "Doubao",
425
+ "Openness": "Closed",
426
+ "Thinking": false,
427
+ "Version": "v1.0.0",
428
+ "Agent": "Web-Agent",
429
+ "Source": ""
430
+ },
431
+ {
432
+ "eval_name": "doubao-pro-1.5-thinking",
433
+ "Model": "<a target=\"_blank\" href=\"http://volcengine.com/docs/82379/1536428\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">doubao-pro-1.5-thinking</a>",
434
+ "Pass@2": 30.220000000000002,
435
+ "Pass@1": 20.11,
436
+ "Error@1": 10.56,
437
+ "Provider": "Doubao",
438
+ "Openness": "Closed",
439
+ "Thinking": true,
440
+ "Version": "v1.0.0",
441
+ "Agent": "Web-Agent",
442
+ "Source": ""
443
+ },
444
+ {
445
+ "eval_name": "doubao-pro-1.5-32k-lite",
446
+ "Model": "<a target=\"_blank\" href=\"https://www.volcengine.com/docs/82379/1554516\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">doubao-pro-1.5-32k-lite</a>",
447
+ "Pass@2": 5.9799999999999995,
448
+ "Pass@1": 3.4799999999999995,
449
+ "Error@1": 6.41,
450
+ "Provider": "Doubao",
451
+ "Openness": "Closed",
452
+ "Thinking": false,
453
+ "Version": "v1.0.0",
454
+ "Agent": "Web-Agent",
455
+ "Source": ""
456
+ },
457
+ {
458
+ "eval_name": "GLM-4-0414",
459
+ "Model": "<a target=\"_blank\" href=\"https://huggingface.co/THUDM/GLM-4-32B-0414\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">GLM-4-0414</a>",
460
+ "Pass@2": 9.02,
461
+ "Pass@1": 7.5,
462
+ "Error@1": 6.3,
463
+ "Provider": "Thudm",
464
+ "Openness": "Open",
465
+ "Thinking": false,
466
+ "Version": "v1.0.0",
467
+ "Agent": "Web-Agent",
468
+ "Source": ""
469
+ },
470
+ {
471
+ "eval_name": "step-fun-2-16k",
472
+ "Model": "<a target=\"_blank\" href=\"https://www.stepfun.com/company#step2\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">step-fun-2-16k</a>",
473
+ "Pass@2": 15.870000000000001,
474
+ "Pass@1": 13.700000000000001,
475
+ "Error@1": 6.09,
476
+ "Provider": "StepFun",
477
+ "Openness": "Closed",
478
+ "Thinking": false,
479
+ "Version": "v1.0.0",
480
+ "Agent": "Web-Agent",
481
+ "Source": ""
482
+ },
483
+ {
484
+ "eval_name": "sense-chat-5",
485
+ "Model": "<a target=\"_blank\" href=\"https://chat.sensetime.com/\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">sense-chat-5</a>",
486
+ "Pass@2": 12.72,
487
+ "Pass@1": 8.48,
488
+ "Error@1": 6.8500000000000005,
489
+ "Provider": "SenseTime",
490
+ "Openness": "Closed",
491
+ "Thinking": false,
492
+ "Version": "v1.0.0",
493
+ "Agent": "Web-Agent",
494
+ "Source": ""
495
+ },
496
+ {
497
+ "eval_name": "360-gpt2-o1",
498
+ "Model": "<a target=\"_blank\" href=\"https://aiplus.360.cn/tags/360gpt2-o1.html\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">360-gpt2-o1</a>",
499
+ "Pass@2": 14.46,
500
+ "Pass@1": 8.260000000000002,
501
+ "Error@1": 7.39,
502
+ "Provider": 360,
503
+ "Openness": "Closed",
504
+ "Thinking": false,
505
+ "Version": "v1.0.0",
506
+ "Agent": "Web-Agent",
507
+ "Source": ""
508
+ },
509
+ {
510
+ "eval_name": "minimax-text",
511
+ "Model": "<a target=\"_blank\" href=\"https://huggingface.co/MiniMaxAI/MiniMax-Text-01\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">minimax-text</a>",
512
+ "Pass@2": 10.76,
513
+ "Pass@1": 8.48,
514
+ "Error@1": 6.2,
515
+ "Provider": "MiniMax",
516
+ "Openness": "Open",
517
+ "Thinking": false,
518
+ "Version": "v1.0.0",
519
+ "Agent": "Web-Agent",
520
+ "Source": ""
521
+ },
522
+ {
523
+ "eval_name": "moonshot-kimi-latest",
524
+ "Model": "<a target=\"_blank\" href=\"https://huggingface.co/moonshotai\" style=\"color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;\">moonshot-kimi-latest</a>",
525
+ "Pass@2": 11.85,
526
+ "Pass@1": 5.220000000000001,
527
+ "Error@1": 10.22,
528
+ "Provider": "MoonshotAI",
529
+ "Openness": "Open",
530
+ "Thinking": false,
531
+ "Version": "v1.0.0",
532
+ "Agent": "Web-Agent",
533
+ "Source": ""
534
+ }
535
+ ]
src/submission/check_validity.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ from collections import defaultdict
5
+ from datetime import datetime, timedelta, timezone
6
+
7
+ import huggingface_hub
8
+ from huggingface_hub import ModelCard
9
+ from huggingface_hub.hf_api import ModelInfo
10
+ from transformers import AutoConfig
11
+ from transformers.models.auto.tokenization_auto import AutoTokenizer
12
+
13
+ def check_model_card(repo_id: str) -> tuple[bool, str]:
14
+ """Checks if the model card and license exist and have been filled"""
15
+ try:
16
+ card = ModelCard.load(repo_id)
17
+ except huggingface_hub.utils.EntryNotFoundError:
18
+ return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
+
20
+ # Enforce card content
21
+ if len(card.text) < 200:
22
+ return False, "Please add a description to your model card, it is too short."
23
+
24
+ return True, ""
25
+
26
+ def is_model_on_hub(model_name: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
27
+ """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
28
+ try:
29
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=token)
30
+ if test_tokenizer:
31
+ try:
32
+ tk = AutoTokenizer.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=token)
33
+ except ValueError as e:
34
+ return (
35
+ False,
36
+ f"uses a tokenizer which is not in a transformers release: {e}",
37
+ None
38
+ )
39
+ except Exception as e:
40
+ return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
41
+ return True, None, config
42
+
43
+ except ValueError:
44
+ return (
45
+ False,
46
+ "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
47
+ None
48
+ )
49
+
50
+ except Exception as e:
51
+ return False, "was not found on hub!", None
52
+
53
+
54
+ def get_model_size(model_info: ModelInfo):
55
+ """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
56
+ try:
57
+ model_size = round(model_info.safetensors["total"] / 1e9, 3)
58
+ except (AttributeError, TypeError):
59
+ return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
60
+
61
+ size_factor = 8
62
+ model_size = size_factor * model_size
63
+ return model_size
64
+
65
+ def already_submitted_models(requested_models_dir: str) -> set[str]:
66
+ """Gather a list of already submitted models to avoid duplicates"""
67
+ depth = 1
68
+ file_names = []
69
+ users_to_submission_dates = defaultdict(list)
70
+
71
+ for root, _, files in os.walk(requested_models_dir):
72
+ current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
73
+ if current_depth == depth:
74
+ for file in files:
75
+ if not file.endswith(".json"):
76
+ continue
77
+ with open(os.path.join(root, file), "r") as f:
78
+ info = json.load(f)
79
+ # Select organisation
80
+ if info["model"].count("/") == 0 or "submitted_time" not in info:
81
+ continue
82
+ organisation, _ = info["model"].split("/")
83
+ users_to_submission_dates[organisation].append(info["submitted_time"])
84
+
85
+ return set(file_names), users_to_submission_dates
src/submission/submit.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from datetime import datetime, timezone
4
+
5
+ from src.display.formatting import styled_error, styled_message, styled_warning
6
+ from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
+ from src.submission.check_validity import (
8
+ already_submitted_models,
9
+ check_model_card,
10
+ get_model_size,
11
+ is_model_on_hub,
12
+ )
13
+
14
+ REQUESTED_MODELS = None
15
+ USERS_TO_SUBMISSION_DATES = None
16
+
17
+ def add_new_eval(
18
+ model: str,
19
+ base_model: str,
20
+ ):
21
+ global REQUESTED_MODELS
22
+ global USERS_TO_SUBMISSION_DATES
23
+ if not REQUESTED_MODELS:
24
+ REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
25
+
26
+ user_name = ""
27
+ model_path = model
28
+ if "/" in model:
29
+ user_name = model.split("/")[0]
30
+ model_path = model.split("/")[1]
31
+
32
+ current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
33
+
34
+ # Is the model info correctly filled?
35
+ try:
36
+ model_info = API.model_info(repo_id=model)
37
+ except Exception:
38
+ return styled_error("Could not get your model information. Please fill it up properly.")
39
+
40
+ model_size = get_model_size(model_info=model_info)
41
+
42
+ modelcard_OK, error_msg = check_model_card(model)
43
+ if not modelcard_OK:
44
+ return styled_error(error_msg)
45
+
46
+ # Seems good, creating the eval
47
+ print("Adding new eval")
48
+
49
+ eval_entry = {
50
+ "model": model,
51
+ "base_model": base_model,
52
+ "status": "PENDING",
53
+ "submitted_time": current_time,
54
+ "private": False,
55
+ }
56
+
57
+ # Check for duplicate submission
58
+ if f"{model}" in REQUESTED_MODELS:
59
+ return styled_warning("This model has been already submitted.")
60
+
61
+ print("Creating eval file")
62
+ OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
63
+ os.makedirs(OUT_DIR, exist_ok=True)
64
+ out_path = f"{OUT_DIR}/{model_path}_eval_request_False.json"
65
+
66
+ with open(out_path, "w") as f:
67
+ f.write(json.dumps(eval_entry))
68
+
69
+ print("Uploading eval file")
70
+ API.upload_file(
71
+ path_or_fileobj=out_path,
72
+ path_in_repo=out_path.split("eval-queue/")[1],
73
+ repo_id=QUEUE_REPO,
74
+ repo_type="dataset",
75
+ commit_message=f"Add {model} to eval queue",
76
+ )
77
+
78
+ # Remove the local file
79
+ os.remove(out_path)
80
+
81
+ return styled_message(
82
+ "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
83
+ )