Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -84,18 +84,15 @@ class DocumentProcessor:
|
|
| 84 |
return self.vectordb_doc
|
| 85 |
|
| 86 |
|
| 87 |
-
def parse_output(self,
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
# result = self.llm.invoke(message)
|
| 98 |
-
return result
|
| 99 |
|
| 100 |
def document_chain(self):
|
| 101 |
# prompt = ChatPromptTemplate.from_template("""Answer the following question based only on the provided context:
|
|
@@ -123,7 +120,7 @@ class DocumentProcessor:
|
|
| 123 |
def get_response(self, retrieval_chain, message):
|
| 124 |
# response = retrieval_chain.invoke({"input": "how can langsmith help with testing?"})
|
| 125 |
response = retrieval_chain.invoke({"input": message})
|
| 126 |
-
print(response["answer"])
|
| 127 |
return response["answer"]
|
| 128 |
|
| 129 |
|
|
@@ -155,7 +152,7 @@ def echo(message, history, processor):
|
|
| 155 |
|
| 156 |
chain_result = processor.get_response(reterival_chain, message)
|
| 157 |
parsed_result = processor.parse_output(chain_result)
|
| 158 |
-
return
|
| 159 |
|
| 160 |
except Exception as e:
|
| 161 |
# Handle any exceptions that occur during execution
|
|
@@ -203,9 +200,10 @@ def main():
|
|
| 203 |
def process_echo(message, history):
|
| 204 |
return echo(message, history, processor)
|
| 205 |
|
| 206 |
-
gr.ChatInterface(fn=process_echo, examples=["what is title", "what is summary", "
|
|
|
|
|
|
|
| 207 |
|
| 208 |
-
gr.Markdown("[Note: The answers can be incorrect as we are using smaller model]")
|
| 209 |
|
| 210 |
# with gr.Blocks() as demo:
|
| 211 |
# gr.ChatInterface(fn=echo, examples=["what is title", "what is summary", "merhaba"], title="chat with your data")
|
|
|
|
| 84 |
return self.vectordb_doc
|
| 85 |
|
| 86 |
|
| 87 |
+
def parse_output(self, response):
|
| 88 |
+
# Find the index where "Question:" starts
|
| 89 |
+
question_index = response.find("Question:")
|
| 90 |
+
# Get all text including and after "Question:"
|
| 91 |
+
if question_index != -1:
|
| 92 |
+
result_text = response[question_index:].strip()
|
| 93 |
+
return result_text
|
| 94 |
+
else:
|
| 95 |
+
return "I apologies, I don't know the answer"
|
|
|
|
|
|
|
|
|
|
| 96 |
|
| 97 |
def document_chain(self):
|
| 98 |
# prompt = ChatPromptTemplate.from_template("""Answer the following question based only on the provided context:
|
|
|
|
| 120 |
def get_response(self, retrieval_chain, message):
|
| 121 |
# response = retrieval_chain.invoke({"input": "how can langsmith help with testing?"})
|
| 122 |
response = retrieval_chain.invoke({"input": message})
|
| 123 |
+
# print(response["answer"])
|
| 124 |
return response["answer"]
|
| 125 |
|
| 126 |
|
|
|
|
| 152 |
|
| 153 |
chain_result = processor.get_response(reterival_chain, message)
|
| 154 |
parsed_result = processor.parse_output(chain_result)
|
| 155 |
+
return parsed_result
|
| 156 |
|
| 157 |
except Exception as e:
|
| 158 |
# Handle any exceptions that occur during execution
|
|
|
|
| 200 |
def process_echo(message, history):
|
| 201 |
return echo(message, history, processor)
|
| 202 |
|
| 203 |
+
gr.ChatInterface(fn=process_echo, examples=["what is title", "what is summary", "create notes"])
|
| 204 |
+
|
| 205 |
+
gr.Markdown("* Note: The answers can be incorrect due to Smaller Model/AI")
|
| 206 |
|
|
|
|
| 207 |
|
| 208 |
# with gr.Blocks() as demo:
|
| 209 |
# gr.ChatInterface(fn=echo, examples=["what is title", "what is summary", "merhaba"], title="chat with your data")
|