Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import os | |
| import requests | |
| import time | |
| # Config | |
| CHUNKS_FILE = "chunks.txt" | |
| GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") | |
| GEMINI_API_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent" | |
| MAX_CONTEXT_LENGTH = 1000 | |
| MAX_RESPONSE_LENGTH = 300 | |
| # Load chunks for answer mode | |
| def load_chunks(chunks_file): | |
| chunks = [] | |
| try: | |
| with open(chunks_file, 'r', encoding='utf-8') as file: | |
| current_chunk = "" | |
| for line in file: | |
| if line.startswith("Chunk"): | |
| if current_chunk: | |
| chunks.append(current_chunk.strip()) | |
| current_chunk = "" | |
| else: | |
| current_chunk += line | |
| if current_chunk: | |
| chunks.append(current_chunk.strip()) | |
| return chunks | |
| except Exception as e: | |
| st.error(f"β οΈ Error loading chunks: {e}") | |
| return [] | |
| # Search relevant chat chunks | |
| def search_messages(query, chunks, top_k=3): | |
| query_words = set(query.lower().split()) | |
| scores = [] | |
| for chunk in chunks: | |
| chunk_words = set(chunk.lower().split()) | |
| match_count = len(query_words.intersection(chunk_words)) | |
| score = match_count / max(len(chunk_words), 1) | |
| scores.append((score, chunk)) | |
| scores.sort(reverse=True) | |
| return [chunk for _, chunk in scores[:top_k]] | |
| # Generate answer from Gemini | |
| def generate_response(query, chunks): | |
| try: | |
| context = "\n".join(chunks)[:MAX_CONTEXT_LENGTH] | |
| prompt = f""" | |
| You are a professional customer support assistant. You resolve user issues by analyzing previous customer interactions and providing clear, helpful, and empathetic responses. | |
| Instructions: | |
| - Use the provided chat history as your internal knowledge base. | |
| - Do not mention or reference the history directly. | |
| - Understand recurring issues and recognize patterns from similar past cases. | |
| - For the given user query: | |
| - Greet and acknowledge the concern professionally. | |
| - Suggest a solution or steps, based on insights from similar historical interactions. | |
| - If the solution is uncertain, offer best practices or next steps. | |
| - End with a polite closing and an offer for further help. | |
| - Don't mention about past history or previous tickets. | |
| Chat History: | |
| {context} | |
| User Query: | |
| "{query}" | |
| Your Response: | |
| """.strip() | |
| headers = { | |
| "Content-Type": "application/json", | |
| "X-goog-api-key": GEMINI_API_KEY | |
| } | |
| data = { | |
| "contents": [{"parts": [{"text": prompt}]}], | |
| "generationConfig": {"maxOutputTokens": MAX_RESPONSE_LENGTH} | |
| } | |
| response = requests.post(GEMINI_API_URL, headers=headers, json=data) | |
| response.raise_for_status() | |
| response_data = response.json() | |
| return response_data["candidates"][0]["content"]["parts"][0]["text"].strip() | |
| except Exception as e: | |
| return f"β οΈ Error generating response: {e}" | |
| # Format instruction based on example | |
| def format_instruction(raw_input): | |
| try: | |
| example_input = """MOSTK014_P11 | |
| HOME RETURN error""" | |
| example_output = """Dear @Fa, | |
| Kindly check the issue on MOSTK014_P11 - we are encountering a HOME RETURN FAIL ERROR. | |
| Please assist at your earliest convenience. | |
| Thank you for your support.""" | |
| prompt = f""" | |
| You are a professional support coordinator. | |
| When given a raw technical input, you must convert it into a polite, professional 4β5 line instruction message, similar in tone and format to the example below. | |
| --- EXAMPLE --- | |
| Raw Input: | |
| {example_input} | |
| Formatted Output: | |
| {example_output} | |
| --- END EXAMPLE --- | |
| Rules: | |
| - always use a greeting and closing | |
| - always team Dear @Fa, | |
| - always genarete Unique tone not same as the template but keep the same | |
| - don't use complex words or sentence | |
| Now format this new input the same way: | |
| Raw Input: | |
| {raw_input} | |
| Formatted Output: | |
| """.strip() | |
| headers = { | |
| "Content-Type": "application/json", | |
| "X-goog-api-key": GEMINI_API_KEY | |
| } | |
| data = { | |
| "contents": [{"parts": [{"text": prompt}]}], | |
| "generationConfig": { | |
| "maxOutputTokens": 200, | |
| "temperature": 0.7 | |
| } | |
| } | |
| response = requests.post(GEMINI_API_URL, headers=headers, json=data) | |
| response.raise_for_status() | |
| response_data = response.json() | |
| return response_data["candidates"][0]["content"]["parts"][0]["text"].strip() | |
| except Exception as e: | |
| return f"β οΈ Error formatting instruction: {e}" | |
| # Main app | |
| def main(): | |
| st.set_page_config(page_title="Support Assistant", layout="centered") | |
| st.title("π€ AssistEdge π") | |
| st.caption("AssistEdge is your intelligent frontline support companion β blending historical insights with real-time AI to deliver clear, empathetic, and professional responses. Whether you're resolving recurring issues or crafting polished technical instructions, AssistEdge keeps the conversation flowing with memory-aware chat and smart formatting.") | |
| # Mode switcher | |
| mode = st.radio("Select Mode:", ["Instruction Formatter","Answer Mode (Chat)"], horizontal=True) | |
| # Initialize session states | |
| if "chunks" not in st.session_state: | |
| st.session_state.chunks = load_chunks(CHUNKS_FILE) | |
| if "answer_messages" not in st.session_state: | |
| st.session_state.answer_messages = [] | |
| if "format_messages" not in st.session_state: | |
| st.session_state.format_messages = [] | |
| if mode == "Answer Mode (Chat)": | |
| # Show previous chat messages | |
| for msg in st.session_state.answer_messages: | |
| with st.chat_message(msg["role"]): | |
| st.markdown(msg["content"]) | |
| if msg["role"] == "assistant": | |
| with st.expander("π Copy Response"): | |
| st.code(msg["content"], language="markdown") | |
| # Chat input | |
| user_input = st.chat_input("Type your support question here...") | |
| if user_input: | |
| st.chat_message("user").markdown(user_input) | |
| st.session_state.answer_messages.append({"role": "user", "content": user_input}) | |
| with st.chat_message("assistant"): | |
| with st.spinner("π§ Thinking..."): | |
| relevant_chunks = search_messages(user_input, st.session_state.chunks) | |
| bot_reply = generate_response(user_input, relevant_chunks) | |
| time.sleep(0.5) | |
| st.markdown(bot_reply) | |
| with st.expander("π Copy Response"): | |
| st.code(bot_reply, language="markdown") | |
| st.session_state.answer_messages.append({"role": "assistant", "content": bot_reply}) | |
| else: | |
| # Show formatter message history | |
| for msg in st.session_state.format_messages: | |
| with st.chat_message(msg["role"]): | |
| st.markdown(msg["content"]) | |
| if msg["role"] == "assistant": | |
| with st.expander("π Copy Instruction"): | |
| st.code(msg["content"], language="markdown") | |
| formatter_input = st.chat_input("Enter raw error or instruction to format...") | |
| if formatter_input: | |
| st.chat_message("user").markdown(formatter_input) | |
| st.session_state.format_messages.append({"role": "user", "content": formatter_input}) | |
| with st.chat_message("assistant"): | |
| with st.spinner("π Formatting..."): | |
| formatted = format_instruction(formatter_input) | |
| time.sleep(0.5) | |
| st.markdown(formatted) | |
| with st.expander("π Copy Instruction"): | |
| st.code(formatted, language="markdown") | |
| st.session_state.format_messages.append({"role": "assistant", "content": formatted}) | |
| if __name__ == "__main__": | |
| main() | |