Blanca commited on
Commit
95df290
·
verified ·
1 Parent(s): f07ce30

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -136,7 +136,7 @@ Your final output should consist of only one of the following:
136
  Do not include any explanation, reasoning, or additional text in your output."""}
137
 
138
  def call_start():
139
- return format_warning("We are starting your evaluation. This can take a few minutes.")
140
 
141
  def add_new_eval(
142
  model: str,
@@ -167,7 +167,6 @@ def add_new_eval(
167
  if not "@" in parsed_mail:
168
  return format_warning("Please provide a valid email adress.")
169
 
170
- print("Adding new eval")
171
 
172
  # Check if the combination model/org already exists and prints a warning message if yes
173
  if model.lower() in set([m.lower() for m in eval_results[val_or_test]["model"]]) and organisation.lower() in set([o.lower() for o in eval_results[val_or_test]["organisation"]]):
@@ -260,11 +259,11 @@ def add_new_eval(
260
  label = 'evaluation_issue'
261
  print(winner, flush=True)
262
 
263
- print(label, flush=True)
264
  if label == 'Useful':
265
  intervention_score += 1/3
266
 
267
- print(id_to_eval, intervention_score, flush=True)
268
  scores.append(intervention_score)
269
 
270
  scored_file.write(
@@ -282,12 +281,12 @@ def add_new_eval(
282
 
283
 
284
  #return format_error(score)
285
- score = sum(scores)/len(scores)*10
286
- print(score, flush=True)
287
 
288
 
289
 
290
- print(task_ids, flush=True)
291
 
292
  # Check if there's any duplicate in the submission
293
  if len(task_ids) != len(set(task_ids)):
 
136
  Do not include any explanation, reasoning, or additional text in your output."""}
137
 
138
  def call_start():
139
+ return format_log("We are starting your evaluation. This can take a few minutes.")
140
 
141
  def add_new_eval(
142
  model: str,
 
167
  if not "@" in parsed_mail:
168
  return format_warning("Please provide a valid email adress.")
169
 
 
170
 
171
  # Check if the combination model/org already exists and prints a warning message if yes
172
  if model.lower() in set([m.lower() for m in eval_results[val_or_test]["model"]]) and organisation.lower() in set([o.lower() for o in eval_results[val_or_test]["organisation"]]):
 
259
  label = 'evaluation_issue'
260
  print(winner, flush=True)
261
 
262
+ #print(label, flush=True)
263
  if label == 'Useful':
264
  intervention_score += 1/3
265
 
266
+ #print(id_to_eval, intervention_score, flush=True)
267
  scores.append(intervention_score)
268
 
269
  scored_file.write(
 
281
 
282
 
283
  #return format_error(score)
284
+ score = sum(scores)/len(scores)*100
285
+ #print(score, flush=True)
286
 
287
 
288
 
289
+ #print(task_ids, flush=True)
290
 
291
  # Check if there's any duplicate in the submission
292
  if len(task_ids) != len(set(task_ids)):