From b91272964287faebe92f68b4b8e394c44d2c83d5 Mon Sep 17 00:00:00 2001
From: "S.P. Mohanty" <spmohanty91@gmail.com>
Date: Tue, 19 Mar 2024 02:44:06 +0000
Subject: [PATCH] update local_evaluation.py

---
 local_evaluation.py | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/local_evaluation.py b/local_evaluation.py
index 2e0196b..6ab525a 100644
--- a/local_evaluation.py
+++ b/local_evaluation.py
@@ -57,9 +57,8 @@ def generate_model_outputs(data_df, model):
         data_df.iterrows(), total=len(data_df), desc="Generating Responses"
     ):
         is_multiple_choice = row["task_type"] == "multiple-choice"
-        # the 'task_type' column won't be available during evaluation
-        # please consistently use just the `is_multiple_choice` parameter
-        # passed to the `.predict`method.`
+        # the 'task_type' column won't be available during evaluation, so you should use something like
+        # ```is_multiple_choice = row['is_multiple_choice']``
         prompt = row["input_field"]
         model_output = model.predict(prompt, is_multiple_choice)
         outputs.append(model_output)
-- 
GitLab