leomini commited on
Commit
58af20d
·
verified ·
1 Parent(s): 262fef2

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +30 -0
  2. app.py +134 -0
  3. chunks.txt +0 -0
  4. requirements.txt +2 -0
Dockerfile ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a lightweight Python base image
2
+ FROM python:3.9-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies
8
+ RUN apt-get update && apt-get install -y \
9
+ bash \
10
+ && rm -rf /var/lib/apt/lists/*
11
+
12
+ # Install Python dependencies
13
+ COPY requirements.txt .
14
+ RUN pip install --no-cache-dir -r requirements.txt
15
+
16
+ # Create a non-root user
17
+ RUN useradd -m -u 1000 user
18
+
19
+ # Copy application files
20
+ COPY app.py .
21
+ COPY chunks.txt .
22
+
23
+ # Switch to non-root user
24
+ USER user
25
+
26
+ # Set environment variable for Gemini API key (to be provided via Hugging Face Secrets)
27
+ ENV GEMINI_API_KEY=AIzaSyDteeiTCZIt9J-NntBUrdWLG3WuXGhules
28
+
29
+ # Run the Streamlit app
30
+ CMD ["streamlit", "run", "app.py", "--server.port", "7860", "--server.address", "0.0.0.0"]
app.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import requests
4
+ import time
5
+
6
+ # Config
7
+ CHUNKS_FILE = "chunks.txt" # Updated to match Dockerfile structure
8
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "AIzaSyDteeiTCZIt9J-NntBUrdWLG3WuXGhules")
9
+ GEMINI_API_URL = "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent"
10
+ MAX_CONTEXT_LENGTH = 1000
11
+ MAX_RESPONSE_LENGTH = 300
12
+
13
+ # Load chunks
14
+ def load_chunks(chunks_file):
15
+ chunks = []
16
+ try:
17
+ with open(chunks_file, 'r', encoding='utf-8') as file:
18
+ current_chunk = ""
19
+ for line in file:
20
+ if line.startswith("Chunk"):
21
+ if current_chunk:
22
+ chunks.append(current_chunk.strip())
23
+ current_chunk = ""
24
+ else:
25
+ current_chunk += line
26
+ if current_chunk:
27
+ chunks.append(current_chunk.strip())
28
+ return chunks
29
+ except Exception as e:
30
+ st.error(f"⚠️ Error loading chunks: {e}")
31
+ return []
32
+
33
+ # Basic keyword search
34
+ def search_messages(query, chunks, top_k=3):
35
+ query_words = set(query.lower().split())
36
+ scores = []
37
+ for chunk in chunks:
38
+ chunk_words = set(chunk.lower().split())
39
+ match_count = len(query_words.intersection(chunk_words))
40
+ score = match_count / max(len(chunk_words), 1)
41
+ scores.append((score, chunk))
42
+ scores.sort(reverse=True)
43
+ return [chunk for _, chunk in scores[:top_k]]
44
+
45
+ # Call Gemini
46
+ def generate_response(query, chunks):
47
+ try:
48
+ context = "\n".join(chunks)[:MAX_CONTEXT_LENGTH]
49
+ prompt = f"""
50
+ You are a professional customer support assistant. You resolve user issues by analyzing previous customer interactions and providing clear, helpful, and empathetic responses.
51
+
52
+ Instructions:
53
+ - Use the provided chat history as your internal knowledge base.
54
+ - Do not mention or reference the history directly.
55
+ - Understand recurring issues and recognize patterns from similar past cases.
56
+ - For the given user query:
57
+ - Greet and acknowledge the concern professionally.
58
+ - Suggest a solution or steps, based on insights from similar historical interactions.
59
+ - If the solution is uncertain, offer best practices or next steps.
60
+ - End with a polite closing and an offer for further help.
61
+ - don't mention about past history or previous tickets.
62
+
63
+ Chat History:
64
+ {context}
65
+
66
+ User Query:
67
+ "{query}"
68
+
69
+ Your Response:
70
+ """.strip()
71
+
72
+ headers = {
73
+ "Content-Type": "application/json",
74
+ "X-goog-api-key": GEMINI_API_KEY
75
+ }
76
+
77
+ data = {
78
+ "contents": [{"parts": [{"text": prompt}]}],
79
+ "generationConfig": {"maxOutputTokens": MAX_RESPONSE_LENGTH}
80
+ }
81
+
82
+ response = requests.post(GEMINI_API_URL, headers=headers, json=data)
83
+ response.raise_for_status()
84
+ response_data = response.json()
85
+ return response_data["candidates"][0]["content"]["parts"][0]["text"].strip()
86
+
87
+ except Exception as e:
88
+ return f"⚠️ Error generating response: {e}"
89
+
90
+ # App UI
91
+ def main():
92
+ st.set_page_config(page_title=" Support Assistant", layout="centered")
93
+ st.title("✅ Assistant ✅")
94
+ st.caption("Submit support questions that are related to previously resolved tickets to ensure efficient and accurate assistance")
95
+
96
+ # Load chunks and history
97
+ if "chunks" not in st.session_state:
98
+ st.session_state.chunks = load_chunks(CHUNKS_FILE)
99
+ if "messages" not in st.session_state:
100
+ st.session_state.messages = []
101
+
102
+ # Show chat history
103
+ for message in st.session_state.messages:
104
+ role, content = message["role"], message["content"]
105
+ with st.chat_message("user" if role == "user" else "assistant"):
106
+ st.markdown(content)
107
+ if role == "assistant":
108
+ with st.expander("📋 Copy Response"):
109
+ st.code(content, language="markdown")
110
+
111
+ # User input
112
+ user_input = st.chat_input("Type your support question here...")
113
+
114
+ if user_input:
115
+ # Display user message
116
+ with st.chat_message("user"):
117
+ st.markdown(user_input)
118
+ st.session_state.messages.append({"role": "user", "content": user_input})
119
+
120
+ # Show bot is thinking...
121
+ with st.chat_message("assistant"):
122
+ with st.spinner("🧠 Thinking..."):
123
+ relevant_chunks = search_messages(user_input, st.session_state.chunks)
124
+ bot_reply = generate_response(user_input, relevant_chunks)
125
+ time.sleep(0.5) # simulate delay
126
+ st.markdown(bot_reply)
127
+ with st.expander("📋 Copy Response"):
128
+ st.code(bot_reply, language="markdown")
129
+
130
+ # Save bot reply
131
+ st.session_state.messages.append({"role": "assistant", "content": bot_reply})
132
+
133
+ if __name__ == "__main__":
134
+ main()
chunks.txt ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ streamlit==1.32.0
2
+ requests==2.32.3