Update app.py
Browse files
app.py
CHANGED
|
@@ -154,7 +154,10 @@ def add_new_eval(
|
|
| 154 |
with open(file_path, 'r') as f:
|
| 155 |
data = json.load(f)
|
| 156 |
for ix, line in data.items():
|
| 157 |
-
return format_error(
|
|
|
|
|
|
|
|
|
|
| 158 |
#try:
|
| 159 |
# task = json.loads(line)
|
| 160 |
#except Exception:
|
|
@@ -173,7 +176,7 @@ def add_new_eval(
|
|
| 173 |
scored_file.write(
|
| 174 |
json.dumps({
|
| 175 |
"id": task_id,
|
| 176 |
-
"model_answer": answer,
|
| 177 |
"score": score
|
| 178 |
}) + "\n"
|
| 179 |
)
|
|
|
|
| 154 |
with open(file_path, 'r') as f:
|
| 155 |
data = json.load(f)
|
| 156 |
for ix, line in data.items():
|
| 157 |
+
return format_error(os.getcwd())
|
| 158 |
+
|
| 159 |
+
# TODO: look at each question, compare it to the references, output a label between 0 and 1
|
| 160 |
+
|
| 161 |
#try:
|
| 162 |
# task = json.loads(line)
|
| 163 |
#except Exception:
|
|
|
|
| 176 |
scored_file.write(
|
| 177 |
json.dumps({
|
| 178 |
"id": task_id,
|
| 179 |
+
#"model_answer": answer,
|
| 180 |
"score": score
|
| 181 |
}) + "\n"
|
| 182 |
)
|