Commit Diff


commit - 47579d09e10f17a52a594ced9f8342a8b906117b
commit + fdfa239b5dc646468336c072ad1e355cf3e49af6
blob - 1041a451519402fee46759caecdac8b6dee1ea40
blob + 99e105779faa4c205ddebc010b5d47ba95dcde8b
--- configuration.py
+++ configuration.py
@@ -11,32 +11,64 @@ OLLAMA_URL = os.getenv('RAG_OLLAMA_SERVER_URL', "http:
 OLLAMA_MODEL = os.getenv('RAG_OLLAMA_MODEL', "llama3")
 OLLAMA_EMBEDDINGS_MODEL = os.getenv('RAG_OLLAMA_EMBEDDINGS_MODEL', "nomic-embed-text")
 
+# Enable/disable query rephrasing
+ENABLE_QUERY_REPHRASING = os.getenv('RAG_ENABLE_QUERY_REPHRASING', 'true').lower() == 'true'
+
 # prompt templates
+#SYSTEM_PROMPT: str = r"""
+#* You are acting as a sparing partner for a roleplaying game master.
+#* Do not include introductory or closing remarks.
+#* Use Markdown for all formatting (e.g., bold, italics, code blocks, lists, links).
+#* If the question is unclear, ask clarifying questions.
+#
+#IMPORTANT: 
+#Each context source starts with a line with `Source n:` where `n` is the source number.
+#- Always add inline citations using the source number, example: `[n]`.
+#- Every answer should include at least one source citation.
+#- Only cite a source when you are explicitly referencing it.
+#- If none of the sources are helpful, you should indicate that.
+#"""
+
 SYSTEM_PROMPT: str = r"""
-* You are acting as a sparing partner for a roleplaying game master.
-* Do not include introductory or closing remarks.
-* Use Markdown for all formatting (e.g., bold, italics, code blocks, lists, links).
-* If the question is unclear, ask clarifying questions.
+You are an expert assistant that provides well-sourced answers.
+Use Markdown for all formatting (e.g., bold, italics, code blocks, lists, links).
 
-IMPORTANT: 
-- Please provide an answer based solely on the provided sources.
-- Reference citations using the source(s) number.
-- Every answer should include at least one source citation.
-- Only cite a source when you are explicitly referencing it.
-- If none of the sources are helpful, you should indicate that.
+# CITATION REQUIREMENTS:
+* After **EVERY** statement, add `[n]` where n is the source number is was derived from
+* Source numbers correspond to the numbered sources below
+* For answers supported by multiple sources, use `[1][2][3]` format
+* Never cite sources that don't exist in the provided context
+* If information spans multiple sentences, cite after each relevant sentence
+
+## EXAMPLE OUTPUT FORMAT:
+The renewable energy sector has grown significantly [1]. Solar power installations increased by 20% last year [2]. Wind energy also showed strong growth [1][3].
 """
+
 HUMAN_TEMPLATE: str = r"""
----
-Below are several numbered sources:
+# PROVIDED SOURCES:
 {context}
----
-Additionally, here is the conversation history:
+
+# CONVERSATION HISTORY:
 {history}
----
-Answer the question based on the above context and history: {question}
+
+# USER QUESTION:
+{question}
+
+# ANSWER WITH PROPER CITATIONS:
 """
 
+#HUMAN_TEMPLATE: str = r"""
+#---
+#Below are several numbered sources:
+#{context}
+#---
+#Additionally, here is the conversation history:
+#{history}
+#---
+#Answer the question based on the above context and history: {question}
+#"""
 
+
 def embeddings():
     embeddings = OllamaEmbeddings(
         base_url=OLLAMA_URL,
blob - 5847c1a9ceefd25689698578aa054c029e6b9664
blob + 46ac5804929253742081a57fe74fa51e3e116225
--- rag_backend.py
+++ rag_backend.py
@@ -26,11 +26,11 @@ class RagBackend:
 
     def query(self, query_text: str, history: str) -> (str, list[str]):
         context_docs = sorted(self.db.similarity_search_with_score(query_text, k=5),
-                              key=lambda x: x[1], reverse=False)
+                              key=lambda x: x[1], reverse=True)
 
         # Format context from filtered documents
         context_text = "\n\n---\n".join([
-            f"Source {i + 1}:\n{doc.page_content}"
+            f"[{i + 1}]:\n{doc.page_content}"
             for i, (doc, _score) in enumerate(context_docs)
         ])
 
@@ -44,9 +44,9 @@ class RagBackend:
             SystemMessage(content=SYSTEM_PROMPT),
             HumanMessagePromptTemplate.from_template(HUMAN_TEMPLATE)])
         prompt = prompt_template.format(context=context_text, history=history, question=query_text)
-        print("================")
-        print(prompt)
-        print("================")
+        # print("================")
+        # print(prompt)
+        # print("================")
         response_text = self.model.invoke(prompt)
         # print("----------------")
         # print(response_text)
blob - c7d5676cb055baf2d1c4a7350e8a10b5f8521ec6
blob + 2681329d51ff47e981427b95421382f242729275
--- rag_interface.py
+++ rag_interface.py
@@ -49,4 +49,4 @@ def handle_mcp():
 
 
 if __name__ == '__main__':
-    app.run(debug=False)
+    app.run(host="0.0.0.0", port="5555", debug=False)