Spaces:
Sleeping
Sleeping
| from dotenv import load_dotenv | |
| import os | |
| from docx import Document | |
| from llama_index.llms.together import TogetherLLM | |
| from llama_index.core.llms import ChatMessage, MessageRole | |
| from Bio import Entrez | |
| import ssl | |
| from transformers import AutoModelForSequenceClassification, AutoTokenizer | |
| import streamlit as st | |
| from googleapiclient.discovery import build | |
| from typing import List, Optional | |
| load_dotenv() | |
| # 995d5f1a8de125c5b39bb48c2613e85f57d53c0e498a87d1ff33f0ec89a26ec7 | |
| os.environ["TOGETHER_API"] = os.getenv("TOGETHER_API") | |
| os.environ["GOOGLE_SEARCH_API_KEY"] = os.getenv("GOOGLE_SEARCH_API_KEY") | |
| def search_pubmed(query: str) -> Optional[List[str]]: | |
| """ | |
| Searches PubMed for a given query and returns a list of formatted results | |
| (or None if no results are found). | |
| """ | |
| Entrez.email = "harisellahi888@gmail.com" # Replace with your email | |
| try: | |
| ssl._create_default_https_context = ssl._create_unverified_context | |
| handle = Entrez.esearch(db="pubmed", term=query, retmax=3) | |
| record = Entrez.read(handle) | |
| id_list = record["IdList"] | |
| if not id_list: | |
| return None | |
| handle = Entrez.efetch(db="pubmed", id=id_list, retmode="xml") | |
| articles = Entrez.read(handle) | |
| results = [] | |
| for article in articles['PubmedArticle']: | |
| try: | |
| medline_citation = article['MedlineCitation'] | |
| article_data = medline_citation['Article'] | |
| title = article_data['ArticleTitle'] | |
| abstract = article_data.get('Abstract', {}).get('AbstractText', [""])[0] | |
| result = f"**Title:** {title}\n**Abstract:** {abstract}\n" | |
| result += f"**Link:** https://pubmed.ncbi.nlm.nih.gov/{medline_citation['PMID']} \n\n" | |
| results.append(result) | |
| except KeyError as e: | |
| print(f"Error parsing article: {article}, Error: {e}") | |
| return results | |
| except Exception as e: | |
| print(f"Error accessing PubMed: {e}") | |
| return None | |
| def chat_with_pubmed(article_text, article_link): | |
| """ | |
| Engages in a chat-like interaction with a PubMed article using TogetherLLM. | |
| """ | |
| try: | |
| llm = TogetherLLM(model="QWEN/QWEN1.5-14B-CHAT", api_key=os.environ['TOGETHER_API']) | |
| messages = [ | |
| ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful AI assistant summarizing and answering questions about the following medical research article: " + article_link), | |
| ChatMessage(role=MessageRole.USER, content=article_text) | |
| ] | |
| response = llm.chat(messages) | |
| return str(response) if response else "I'm sorry, I couldn't generate a summary for this article." | |
| except Exception as e: | |
| print(f"Error in chat_with_pubmed: {e}") | |
| return "An error occurred while generating a summary." | |
| def search_web(query: str, num_results: int = 3) -> Optional[List[str]]: | |
| """ | |
| Searches the web using the Google Search API and returns a list of formatted results | |
| (or None if no results are found). | |
| """ | |
| try: | |
| service = build("customsearch", "v1", developerKey=os.environ["GOOGLE_SEARCH_API_KEY"]) | |
| # Execute the search request | |
| res = service.cse().list(q=query, cx="e31a5857f45ef4d2a", num=num_results).execute() | |
| if "items" not in res: | |
| return None | |
| results = [] | |
| for item in res["items"]: | |
| title = item["title"] | |
| link = item["link"] | |
| snippet = item["snippet"] | |
| result = f"**Title:** {title}\n**Link:** {link} \n**Snippet:** {snippet}\n\n" | |
| results.append(result) | |
| return results | |
| except Exception as e: | |
| print(f"Error performing web search: {e}") | |
| return None | |
| from together import Together | |
| def medmind_chatbot(user_input, chat_history=None): | |
| """ | |
| Processes user input, interacts with various resources, and generates a response. | |
| Handles potential errors, maintains chat history, | |
| """ | |
| if chat_history is None: | |
| chat_history = [] | |
| response_parts = [] # Collect responses from different sources | |
| final_response = ""; | |
| try: | |
| # PubMed Search and Chat | |
| pubmed_results = search_pubmed(user_input) | |
| if pubmed_results: | |
| for article_text in pubmed_results: | |
| title, abstract, link = article_text.split("\n")[:3] | |
| # print(article_text) | |
| response_parts.append(f"{title}\n{abstract}\n{link}\n") | |
| else: | |
| response_parts.append("No relevant PubMed articles found.") | |
| # Web Search | |
| web_results = search_web(user_input) | |
| if web_results: | |
| response_parts.append("\n\n**Web Search Results:**") | |
| response_parts.extend(web_results) | |
| else: | |
| response_parts.append("No relevant web search results found.") | |
| # Combine response parts into a single string | |
| response_text = "\n\n".join(response_parts) | |
| prompt = f"""You are a Health Assistant AI designed to provide detailed responses to health-related questions. | |
| Based on the information retrieved from the PubMed and Web Search below, answer the user's query appropriately. | |
| - If the user's query is health-related, provide a detailed and helpful response based on the retrieved information. Or if there is | |
| some previous conversation then answer the health by seeing the previous conversation also. | |
| - If the query is a general greeting (e.g., 'Hello', 'Hi'), respond as a friendly assistant. | |
| - If the query is irrelevant or unrelated to health, respond with: 'I am a health assistant. Please ask only health-related questions.' | |
| - Don't mention in response that where you reterived the information. | |
| Previous Conversation: | |
| {chat_history} | |
| User's Query: {user_input} | |
| Information retrieved from PubMed and Web Search: | |
| {response_text} | |
| Your response:""" | |
| client = Together(api_key=os.environ.get('TOGETHER_API')) | |
| response = client.chat.completions.create( | |
| model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", | |
| messages=[{"role": "user", "content": prompt}], | |
| ) | |
| final_response = response.choices[0].message.content | |
| except Exception as e: | |
| print(f"Error in chatbot: {e}") | |
| response_text = "An error occurred. Please try again later." | |
| chat_history.append((user_input, final_response)) | |
| return final_response, chat_history | |
| medmind_chatbot("What are the symptoms of COVID-19?") | |
| import gradio as gr | |
| def show_info_popup(): | |
| info = """ | |
| **HealthHive is an AI-powered chatbot designed to assist with medical information.** | |
| ... | |
| """ | |
| return info | |
| def main(): | |
| # Initialize Gradio Interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# HealthHive Chatbot") | |
| gr.Markdown("Ask your medical questions and get reliable information!") | |
| # Example Questions (Sidebar) | |
| gr.Markdown("### Example Questions") | |
| example_questions = [ | |
| "What are the symptoms of COVID-19?", | |
| "How can I manage my diabetes?", | |
| "What are the potential side effects of ibuprofen?", | |
| "What lifestyle changes can help prevent heart disease?" | |
| ] | |
| for question in example_questions: | |
| gr.Markdown(f"- {question}") | |
| # Chat History and User Input | |
| with gr.Row(): | |
| user_input = gr.Textbox(label="You:", placeholder="Type your medical question here...", lines=2) | |
| chat_history = gr.State([]) | |
| # Output Container | |
| with gr.Row(): | |
| response = gr.Textbox(label="HealthHive:", placeholder="Response will appear here...", interactive=False, lines=10) | |
| def clear_chat(): | |
| return "", "" | |
| # Define function to update chat history and response | |
| def on_submit(user_input, chat_history): | |
| result, updated_history = medmind_chatbot(user_input, chat_history) | |
| info = show_info_popup() | |
| return result, updated_history, info | |
| # Link the submit button to the chatbot function | |
| gr.Button("Submit").click(on_submit, inputs=[user_input, chat_history], outputs=[response, chat_history]) | |
| # gr.Button("Start New Chat").click(lambda: [], outputs=[chat_history]) | |
| gr.Button("Start New Chat").click(clear_chat, outputs=[user_input, response]) | |
| demo.launch() | |
| if __name__ == "__main__": | |
| main() | |