@@ -185,28 +185,29 @@ def render_mds(overwrite_previous, subfolder="docs"):
185
185
"lite" : 3628 ,
186
186
"all" : 140926 ,
187
187
} # hard-coded to skip running it later
188
- for split in tqdm . tqdm ( ["lite" , "all" ]) :
188
+ for split in ["lite" , "all" ]:
189
189
num_repos = len (SPLIT [split ])
190
190
# total_num_tests = 0
191
191
# for repo_name in SPLIT[split]:
192
192
# repo_tests = subprocess.run(['commit0', 'get-tests', repo_name], capture_output=True, text=True).stdout.strip()
193
193
# total_num_tests += len(repo_tests.splitlines())
194
- leaderboard [split ] = leaderboard_header .format (
194
+ leaderboard [split ] = []
195
+ leaderboard [split ].append ((split_to_total_tests [split ]+ 1 , leaderboard_header .format (
195
196
split = split ,
196
197
num_repos = num_repos ,
197
198
total_num_tests = split_to_total_tests [split ],
198
- )
199
+ )))
199
200
200
201
for org_path in tqdm .tqdm (glob .glob (os .path .join (analysis_files_path , "*" ))):
201
202
org_name = os .path .basename (org_path )
202
203
if org_name in {"blank" , "repos" , "submission_repos" }:
203
204
continue
204
205
for branch_path in glob .glob (os .path .join (org_path , "*.json" )):
205
206
cum_tests_passed = 0
206
- lite_cum_tests_passed = 0
207
207
repos_resolved = 0
208
- lite_repos_resolved = 0
209
208
total_duration = 0.0
209
+ lite_cum_tests_passed = 0
210
+ lite_repos_resolved = 0
210
211
lite_total_duration = 0.0
211
212
branch_metrics = json .load (open (branch_path ))
212
213
submission_info = branch_metrics ["submission_info" ]
@@ -301,10 +302,11 @@ def render_mds(overwrite_previous, subfolder="docs"):
301
302
cum_tests_passed += pytest_info ["summary" ]["passed" ]
302
303
total_duration += pytest_info ["duration" ]
303
304
repos_resolved += int (resolved )
304
- if repo_name in SPLIT [ "lite" ] :
305
+ if split == "all" :
305
306
lite_cum_tests_passed += pytest_info ["summary" ]["passed" ]
306
307
lite_total_duration += pytest_info ["duration" ]
307
308
lite_repos_resolved += int (resolved )
309
+
308
310
if write_submission :
309
311
pytest_details = f"{ pytest_info ['summary' ]['passed' ]} / { pytest_info ['summary' ]['total' ]} "
310
312
duration = f"{ pytest_info ['duration' ]:.2f} "
@@ -329,29 +331,33 @@ def render_mds(overwrite_previous, subfolder="docs"):
329
331
wf .write (back_button + "\n " + submission_page )
330
332
analysis_link = f"[Analysis](/{ f'analysis_{ org_name } _{ branch_name } ' } )"
331
333
github_link = f"[Github]({ project_page_link } )"
332
- leaderboard [split ] += (
334
+ leaderboard [split ]. append (( cum_tests_passed ,
333
335
f"\n |{ display_name } |"
334
336
f"{ repos_resolved } |"
335
337
f"{ cum_tests_passed } |"
336
338
f"{ total_duration :.2f} |"
337
339
f"{ submission_date } |"
338
340
f"{ analysis_link } |"
339
341
f"{ github_link } |"
340
- )
341
- if split == "all" :
342
- leaderboard ["lite" ] += (
343
- f"\n |{ display_name } |"
342
+ ))
343
+ if (( split == "all" ) and ( "Reference (Gold)" not in display_name )) :
344
+ leaderboard ["lite" ]. append (( lite_cum_tests_passed ,
345
+ f"\n |{ display_name } (subset of `all`) |"
344
346
f"{ lite_repos_resolved } |"
345
347
f"{ lite_cum_tests_passed } |"
346
348
f"{ lite_total_duration :.2f} |"
347
349
f"{ submission_date } |"
348
350
f"{ analysis_link } |"
349
351
f"{ github_link } |"
350
- )
352
+ ))
351
353
352
354
leaderboard_filepath = os .path .join (subfolder , "analysis.md" )
355
+ for split in ["lite" , "all" ]:
356
+ leaderboard [split ] = sorted (leaderboard [split ], key = lambda elt : - elt [0 ])
353
357
with open (leaderboard_filepath , "w" ) as wf :
354
- wf .write (leaderboard ["lite" ] + "\n \n " + leaderboard ["all" ])
358
+ lite_leaderboard_string = "" .join (string for (_ , string ) in leaderboard ["lite" ])
359
+ all_leaderboard_string = "" .join (string for (_ , string ) in leaderboard ["all" ])
360
+ wf .write (lite_leaderboard_string + "\n \n " + all_leaderboard_string )
355
361
356
362
357
363
def get_args ():
0 commit comments