From acfd2ddb8af15c12e2795091ecca529808013d2e Mon Sep 17 00:00:00 2001
From: "S.P. Mohanty" <spmohanty91@gmail.com>
Date: Mon, 1 Apr 2024 15:59:10 +0000
Subject: [PATCH] update interface documentation

---
 models/README.md      |  3 +--
 models/dummy_model.py | 43 +++++++++++++++++++++++++++++++++----------
 2 files changed, 34 insertions(+), 12 deletions(-)

diff --git a/models/README.md b/models/README.md
index e85d82f..e72366f 100644
--- a/models/README.md
+++ b/models/README.md
@@ -15,10 +15,9 @@ To ensure your model is recognized and utilized correctly, please specify your m
 Your model will receive two pieces of information for every task:
 - `query`: String representing the input query
 - `search_results`: List of strings, each comes from scraped HTML text of the search query.
-- `character_limit`: The maximum character limit for the answer (can vary per query)
 
 ### Outputs
 The output from your model's `generate_answer` function should always be a string.
 
 ## Internet Access
-Your model will not have access to the internet during evaluation. Your model will have access to LLaMa 2 weights. **[TO BE ADDED]**.
\ No newline at end of file
+Your model will not have access to the internet during evaluation. 
\ No newline at end of file
diff --git a/models/dummy_model.py b/models/dummy_model.py
index 8dcb9a5..5abbf30 100644
--- a/models/dummy_model.py
+++ b/models/dummy_model.py
@@ -1,21 +1,44 @@
+import os
 from typing import List
 
+# Load the environment variable that specifies the URL of the MockAPI. This URL is essential
+# for accessing the correct API endpoint in Task 2 and Task 3. The value of this environment variable
+# may vary across different evaluation settings, emphasizing the importance of dynamically obtaining
+# the API URL to ensure accurate endpoint communication.
+#
+# **Note**: This environment will not be available for Task 1 evaluations.
+CRAG_MOCK_API_URL = os.getenv("CRAG_MOCK_API_URL", "http://localhost:8000")
 
 class DummyModel:
     def __init__(self):
-        """ Initialize your models here """
+        """
+        Initialize your model(s) here if necessary.
+        This is the constructor for your DummyModel class, where you can set up any
+        required initialization steps for your model(s) to function correctly.
+        """
         pass
 
     def generate_answer(self, query: str, search_results: List[str]) -> str:
         """
-        You will be provided with a query and the corresponding pre-cached search results for the query
-        
-        Inputs - 
-            query - String representing the input query
-            search_results - List of strings, each comes from scraped HTML text of the search query
-        Returns - 
-            string response - Your answer in plain text, should be limited to the character limit, 
-                              Any longer responses will be trimmed to meet the character limit
+        Generate an answer based on a provided query and a list of pre-cached search results.
+
+        Parameters:
+        - query (str): The user's question or query input.
+        - search_results (List[str]): A list containing the text content from web pages
+          retrieved as search results for the query. Each element in the list is a string
+          representing the HTML text of a web page.
+
+        Returns:
+        - (str): A plain text response that answers the query. This response is limited to 75 tokens.
+          If the generated response exceeds 75 tokens, it will be truncated to fit within this limit.
+
+        Notes:
+        - If the correct answer is uncertain, it's preferable to respond with "I don't know" to avoid
+          the penalty for hallucination.
+        - Response Time: Ensure that your model processes and responds to each query within 10 seconds.
+          Failing to adhere to this time constraint **will** result in a timeout during evaluation.
         """
+        # Default response when unsure about the answer
         answer = "i don't know"
-        return answer
\ No newline at end of file
+        
+        return answer
-- 
GitLab