From 21bd06d36d231797cc5c196cc573ea321f50885a Mon Sep 17 00:00:00 2001
From: Silin <silin.gao@epfl.ch>
Date: Sun, 10 Dec 2023 08:06:53 +0000
Subject: [PATCH] Update local_evaluation_with_api.py, add multi-message
 prompt.

---
 local_evaluation_with_api.py | 20 ++++++++++++++------
 1 file changed, 14 insertions(+), 6 deletions(-)

diff --git a/local_evaluation_with_api.py b/local_evaluation_with_api.py
index a339e3c..be284f6 100644
--- a/local_evaluation_with_api.py
+++ b/local_evaluation_with_api.py
@@ -38,12 +38,20 @@ class LLM_API:
         self.model = "gpt-3.5-turbo-1106"
 
     def api_call(self, prompt, max_tokens):
-        """ Simple single prompt api call """
-        response = self.client.chat.completions.create(
-            model=self.model,
-            messages=[{"role": "user", "content": prompt}],
-            max_tokens=max_tokens,
-        )
+        if isinstance(prompt, str):  # Single-message prompt
+            response = self.client.chat.completions.create(
+                model=self.model,
+                messages=[{"role": "user", "content": prompt}],
+                max_tokens=max_tokens,
+            )
+        elif isinstance(prompt, list):  # Multi-message prompt
+            response = self.client.chat.completions.create(
+                model=self.model,
+                messages=prompt,
+                max_tokens=max_tokens,
+            )
+        else:
+            raise TypeError
         response_text = response.choices[0].message.content
         input_tokens = response.usage.prompt_tokens
         output_tokens = response.usage.completion_tokens
-- 
GitLab