From eec7e265bd2a6d740ac6a167d09ccd061b12bd0d Mon Sep 17 00:00:00 2001
From: yilun_jin <jyl.jal123@gmail.com>
Date: Mon, 18 Mar 2024 20:59:31 +0000
Subject: [PATCH] Update local_evaluation.py

---
 local_evaluation.py | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/local_evaluation.py b/local_evaluation.py
index 995cf2f..7c5eb86 100644
--- a/local_evaluation.py
+++ b/local_evaluation.py
@@ -57,6 +57,8 @@ def generate_model_outputs(data_df, model):
         data_df.iterrows(), total=len(data_df), desc="Generating Responses"
     ):
         is_multiple_choice = row["task_type"] == "multiple-choice"
+        # the 'task_type' column won't be available during evaluation, so you should use something like
+        # ```is_multiple_choice = row['is_multiple_choice']``
         prompt = row["input_field"]
         model_output = model.predict(prompt, is_multiple_choice)
         outputs.append(model_output)
-- 
GitLab