commit - 88c51000b72b7e9e8ecf4c7426f65f8a4d960661
commit + 80520ada7d6b1334f5e5d216668130cbbc6326d7
blob - 85ba3ff8203f1ef16d9d030929fce46e8d93aff4
blob + 79042af64f8f42b28088c646fc2a90feb3fbed14
--- configuration.py
+++ configuration.py
IMPORTANT:
* Please provide an answer based solely on the provided sources.
-* When referencing information from a source, add the corresponding source number inline like this: `[n]`.
+* When referencing information from a source, cite the appropriate source(s) using their corresponding numbers.
+* Format citations as `[n]` where `n` is the sources number.
* Do not include any additional explanations or notes in your citation.
* Every answer should include at least one source citation.
* Only cite a source when you are explicitly referencing it.
* If none of the sources are helpful, you should indicate that.
-* At the end of your answer, list the sources you used in the format: `[n] document` for each source.
-* If no sources are cited, omit listing sources at all.
Example:
```
-[1] (document-file-name.pdf:123):
+Source 1:
The sky is red in the evening and blue in the morning.
-[2] (another-document.txt:23):
+Source 2:
Water is wet when the sky is red.
---
Answer the question based on the above context and history: When is water wet?
Water will be wet when the sky is red [2], which occurs in the evening [1].
-
-### Sources:
-- [1] document-file-name.pdf:123
-- [2] another-document.txt:23
```
"""
HUMAN_TEMPLATE: str = r"""
blob - 358efa3af726eb613eed641e5c6483f51b5f6ac5
blob + 6b2eff9b049d4d8bff074861c83f528f85dae2be
--- rag_backend.py
+++ rag_backend.py
self.model = OllamaLLM(base_url=ollama_url, model=model)
def query(self, query_text: str, history: str) -> (str, list[str]):
- context_docs = self.db.similarity_search_with_score(query_text, k=5)
+ context_docs = sorted(self.db.similarity_search_with_score(query_text, k=5),
+ key=lambda x: x[1], reverse=False)
# Format context from filtered documents
context_text = "\n\n---\n".join([
- f"[{i + 1}] ({':'.join(doc.metadata.get('id').split(':')[:2])}):\n{doc.page_content}" for i,
- (doc, _score) in enumerate(context_docs)])
+ f"Source {i + 1}:\n{doc.page_content}"
+ for i, (doc, _score) in enumerate(context_docs)
+ ])
+ # prepare references for reply
+ references = [
+ f"{':'.join(doc.metadata.get('id').split(':')[:2])}):"
+ for (doc, _score) in context_docs
+ ]
+
prompt_template = ChatPromptTemplate.from_messages([
SystemMessage(content=SYSTEM_PROMPT),
HumanMessagePromptTemplate.from_template(HUMAN_TEMPLATE)])
prompt = prompt_template.format(context=context_text, history=history, question=query_text)
- # print("================")
- # print(prompt)
- # print("================")
+ print("================")
+ print(prompt)
+ print("================")
response_text = self.model.invoke(prompt)
# print("----------------")
# print(response_text)
# print("----------------")
- return response_text
+ return response_text, references
@staticmethod
def load_pdf_documents(path: str) -> list[Document]:
blob - 6488fa41e5750ac622af34ae8b0c9c30d553a31d
blob + c7d5676cb055baf2d1c4a7350e8a10b5f8521ec6
--- rag_interface.py
+++ rag_interface.py
params = data.get('params', {})
# Dispatch to the correct tool
if method == "llm_chat":
- result = rag.query(params.get('query', ''), params.get('history', ''))
+ result, references = rag.query(params.get('query', ''), params.get('history', ''))
return jsonify({
"jsonrpc": "2.0",
"result": {
- 'text': result
+ 'text': result,
+ 'ref': references,
},
"id": data.get("id")
})
blob - 88329261c0ecff8190c540617f5e4ab7ed601a0c
blob + c49c1f20b0b9dbd878d2247d5404c47bd4f0fd4d
--- templates/page.html
+++ templates/page.html
try {
const result = await sendMCPMessage(text, context_prompt);
+ console.log(result.ref);
appendMessage('bot', result.text);
history.push({question: text, answer: result.text});
} catch (error) {