AshBlanc commited on
Commit
a79cd5c
Β·
verified Β·
1 Parent(s): 35d5a72

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +217 -0
app.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ import gradio as gr
4
+ import google.generativeai as genai
5
+ from PIL import Image
6
+ import json
7
+
8
+ load_dotenv()
9
+ # 2. Configure the AI Model
10
+ def setup_api_key():
11
+ try:
12
+ api_key = os.environ.get('GEMINI_API_KEY')
13
+ genai.configure(api_key=api_key)
14
+ print("πŸ”‘ API Key configured successfully.")
15
+ return True
16
+ except Exception as e:
17
+ print(f"πŸ”΄ Error during API configuration: {e}")
18
+ return False
19
+
20
+ API_KEY_IS_SET = setup_api_key()
21
+
22
+ # FIXED: Use correct, publicly available model names for stability.
23
+ MODEL_VISION = 'gemini-1.5-flash-latest'
24
+ MODEL_INITIAL = 'gemini-1.5-flash-latest'
25
+ MODEL_REFINER = 'gemini-1.5-pro-latest'
26
+ MODEL_REWRITER = 'gemini-1.5-pro-latest'
27
+
28
+ # 3. Define System Instructions
29
+ VISION_SYSTEM_INSTRUCTION = "You are an expert UI and content analyst..."
30
+ PROMPT_ENGINEER_SYSTEM_INSTRUCTION = "You are a meticulous AI Prompt Engineer..."
31
+ PROMPT_REFINER_SYSTEM_INSTRUCTION = "## CRITICAL SYSTEM DIRECTIVE\nYour output is not for a human..."
32
+ META_PROMPT_SYSTEM_INSTRUCTION = """
33
+ You are an AI Prompt Optimization expert, a 'Prompt Rewriter'. Your goal is to take a single input prompt and generate 3 distinct, improved variations.
34
+ ## CRITICAL OUTPUT FORMAT
35
+ Your output MUST be a valid JSON array containing exactly three strings. Each string is a complete, rewritten prompt.
36
+ """
37
+
38
+ # 4. Define AI processing functions
39
+ def analyze_screenshot(pil_image: Image.Image) -> str:
40
+ model = genai.GenerativeModel(MODEL_VISION, system_instruction=VISION_SYSTEM_INSTRUCTION)
41
+ try: return model.generate_content(pil_image).text
42
+ except Exception as e: return f"Error: {e}"
43
+
44
+ def initial_prompt_stream(analysis_text, goal):
45
+ model = genai.GenerativeModel(MODEL_INITIAL, system_instruction=PROMPT_ENGINEER_SYSTEM_INSTRUCTION)
46
+ prompt = f"**Analysis:**\n{analysis_text}\n\n**Goal:**\n{goal or 'Infer from analysis.'}"
47
+ for chunk in model.generate_content(prompt, stream=True): yield chunk.text
48
+
49
+ def refinement_prompt_stream(original_prompt, feedback):
50
+ model = genai.GenerativeModel(MODEL_REFINER, system_instruction=PROMPT_REFINER_SYSTEM_INSTRUCTION)
51
+ prompt = f"**Original Prompt:**\n{original_prompt}\n\n**Feedback:**\n{feedback}"
52
+ for chunk in model.generate_content(prompt, stream=True): yield chunk.text
53
+
54
+ def rewrite_prompt_with_prewrite(original_prompt):
55
+ model = genai.GenerativeModel(MODEL_REWRITER, system_instruction=META_PROMPT_SYSTEM_INSTRUCTION)
56
+ try:
57
+ response = model.generate_content(original_prompt)
58
+ cleaned_response = response.text.strip().replace("```json", "").replace("```", "")
59
+ variations = json.loads(cleaned_response)
60
+ if isinstance(variations, list) and len(variations) > 0:
61
+ return variations
62
+ return ["Error: AI returned an invalid format.", "", ""]
63
+ except Exception as e:
64
+ return [f"Error: {e}", "The AI's response was not valid JSON.", ""]
65
+
66
+ # 5. Define Gradio App's interactive logic (CORRECTED)
67
+ def generate_initial_prompt(pil_image, situation_text, goal_clarification):
68
+ # Reset UI
69
+ yield {
70
+ analysis_accordion: gr.update(visible=False, open=False, value=""),
71
+ final_prompt_output: gr.update(value=""),
72
+ satisfaction_row: gr.update(visible=False),
73
+ feedback_col: gr.update(visible=False),
74
+ prewrite_col: gr.update(visible=False),
75
+ prewrite_choices: gr.update(choices=[], value=None),
76
+ analysis_state: None,
77
+ first_prompt_state: None
78
+ }
79
+ if not API_KEY_IS_SET:
80
+ yield {analysis_accordion: gr.update(visible=True, open=True, value="API Key not set.")}; return
81
+ if pil_image is None and not situation_text.strip():
82
+ yield {analysis_accordion: gr.update(visible=True, open=True, value="Error: Please provide context.")}; return
83
+
84
+ yield {analysis_accordion: gr.update(visible=True, open=True, value="Processing...")}
85
+ analysis_text = analyze_screenshot(pil_image) if pil_image is not None else f"The user described their situation as: '{situation_text}'"
86
+ if "Error:" in analysis_text:
87
+ yield {analysis_accordion: gr.update(visible=True, open=True, value=analysis_text)}; return
88
+
89
+ yield {analysis_accordion: gr.update(value=analysis_text), final_prompt_output: gr.update(value="Generating...")}
90
+ final_prompt_full = ""
91
+ for chunk in initial_prompt_stream(analysis_text, goal_clarification):
92
+ final_prompt_full += chunk
93
+ # FIXED: THIS IS THE CRITICAL FIX.
94
+ # Every streaming update for the prompt MUST also contain the state of the analysis panel.
95
+ yield {
96
+ analysis_accordion: gr.update(visible=True, open=True, value=analysis_text),
97
+ final_prompt_output: gr.update(value=final_prompt_full.strip())
98
+ }
99
+
100
+ # Final state update after stream is complete
101
+ yield {
102
+ satisfaction_row: gr.update(visible=True),
103
+ analysis_state: analysis_text,
104
+ first_prompt_state: final_prompt_full.strip()
105
+ }
106
+
107
+ def handle_auto_refine(original_prompt):
108
+ variations = rewrite_prompt_with_prewrite(original_prompt)
109
+ return {
110
+ prewrite_col: gr.update(visible=True),
111
+ prewrite_choices: gr.update(choices=variations, value=variations[0] if variations else ""),
112
+ satisfaction_row: gr.update(visible=False),
113
+ feedback_col: gr.update(visible=False)
114
+ }
115
+
116
+ def select_rewritten_prompt(selected_prompt):
117
+ return {
118
+ final_prompt_output: gr.update(value=selected_prompt),
119
+ first_prompt_state: selected_prompt,
120
+ satisfaction_row: gr.update(visible=True),
121
+ prewrite_col: gr.update(visible=False)
122
+ }
123
+
124
+ def handle_manual_feedback():
125
+ return {
126
+ feedback_col: gr.update(visible=True),
127
+ satisfaction_row: gr.update(visible=False),
128
+ prewrite_col: gr.update(visible=False)
129
+ }
130
+
131
+ def handle_like():
132
+ return {
133
+ satisfaction_row: gr.update(visible=False),
134
+ feedback_col: gr.update(visible=False),
135
+ prewrite_col: gr.update(visible=False)
136
+ }
137
+
138
+ def refine_with_manual_feedback(original_prompt, feedback, current_analysis):
139
+ if not feedback.strip():
140
+ yield {final_prompt_output: original_prompt}; return
141
+ yield {final_prompt_output: "Refining manually..."}
142
+ final_prompt_full = ""
143
+ for chunk in refinement_prompt_stream(original_prompt, feedback):
144
+ final_prompt_full += chunk
145
+ yield {
146
+ analysis_accordion: gr.update(visible=True, open=True, value=current_analysis),
147
+ final_prompt_output: final_prompt_full.strip(),
148
+ first_prompt_state: final_prompt_full.strip()
149
+ }
150
+ yield {satisfaction_row: gr.update(visible=True)}
151
+
152
+ def clear_all():
153
+ return {
154
+ image_input: None,
155
+ situation_input: "",
156
+ goal_input: "",
157
+ analysis_accordion: gr.update(visible=False, open=False, value=""),
158
+ final_prompt_output: "",
159
+ satisfaction_row: gr.update(visible=False),
160
+ feedback_col: gr.update(visible=False),
161
+ prewrite_col: gr.update(visible=False),
162
+ prewrite_choices: gr.update(choices=[], value=None),
163
+ analysis_state: None,
164
+ first_prompt_state: None
165
+ }
166
+
167
+ # 6. Build and launch the Gradio Interface
168
+ theme = gr.themes.Base(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.neutral, font=(gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif")).set(button_primary_background_fill_hover='*primary_600', button_primary_border_color_hover='*primary_600')
169
+
170
+ with gr.Blocks(theme=theme, analytics_enabled=False) as demo:
171
+ analysis_state, first_prompt_state = gr.State(), gr.State()
172
+ gr.Markdown("# πŸ€– Context-Aware Prompt Engineer AI (v20 - FINAL)")
173
+
174
+ with gr.Row(variant="panel"):
175
+ with gr.Column(scale=1):
176
+ with gr.Group():
177
+ gr.Markdown("### 1. Provide Context"); image_input = gr.Image(type="pil", label="Upload Screenshot"); gr.HTML("<div style='text-align: center; font-weight: 500; margin: 10px 0;'>OR</div>"); situation_input = gr.Textbox(label="Describe Your Situation", lines=6)
178
+ with gr.Accordion("2. (Optional) Clarify Goal", open=False):
179
+ goal_input = gr.Textbox(label=None, show_label=False)
180
+ with gr.Row():
181
+ submit_btn = gr.Button("πŸš€ Generate Prompt", variant="primary", scale=3)
182
+ clear_btn = gr.Button("πŸ”„ Start Over", scale=1)
183
+ with gr.Column(scale=2):
184
+ with gr.Accordion("Show AI's Context Analysis", open=False) as analysis_accordion:
185
+ analysis_output = gr.Textbox(label=None, lines=7, interactive=False, show_label=False)
186
+ final_prompt_output = gr.Textbox(label="βœ… Final Engineered Prompt", lines=12, interactive=False, show_copy_button=True)
187
+
188
+ with gr.Row(visible=False) as satisfaction_row:
189
+ gr.Markdown("#### Are you satisfied, or would you like to refine the prompt?");
190
+ like_btn = gr.Button("πŸ‘ Yes, Looks Good")
191
+ auto_refine_btn = gr.Button("πŸ€– Auto-Refine (PRewrite)")
192
+ dislike_btn = gr.Button("πŸ‘Ž Manual Feedback")
193
+
194
+ with gr.Column(visible=False) as prewrite_col:
195
+ gr.Markdown("### PRewrite: AI-Generated Variations"); prewrite_choices = gr.Radio(label="Select the best variation:", type="value"); select_version_btn = gr.Button("βœ… Use This Version", variant="primary")
196
+
197
+ with gr.Column(visible=False) as feedback_col:
198
+ feedback_input = gr.Textbox(label="What would you like to change or improve?", lines=3); refine_btn = gr.Button("πŸ› οΈ Refine Manually", variant="primary")
199
+
200
+ # Event Handlers
201
+ outputs_for_generation = [analysis_accordion, final_prompt_output, satisfaction_row, feedback_col, prewrite_col, prewrite_choices, analysis_state, first_prompt_state]
202
+ submit_btn.click(fn=generate_initial_prompt, inputs=[image_input, situation_input, goal_input], outputs=outputs_for_generation)
203
+ goal_input.submit(fn=generate_initial_prompt, inputs=[image_input, situation_input, goal_input], outputs=outputs_for_generation)
204
+
205
+ like_btn.click(fn=handle_like, outputs=[satisfaction_row, feedback_col, prewrite_col])
206
+ auto_refine_btn.click(fn=handle_auto_refine, inputs=[first_prompt_state], outputs=[prewrite_col, prewrite_choices, satisfaction_row, feedback_col])
207
+ dislike_btn.click(fn=handle_manual_feedback, outputs=[feedback_col, satisfaction_row, prewrite_col])
208
+
209
+ select_version_btn.click(fn=select_rewritten_prompt, inputs=[prewrite_choices], outputs=[final_prompt_output, first_prompt_state, satisfaction_row, prewrite_col])
210
+ # Pass the analysis_state to the manual refiner
211
+ refine_btn.click(fn=refine_with_manual_feedback, inputs=[first_prompt_state, feedback_input, analysis_state], outputs=[analysis_accordion, final_prompt_output, first_prompt_state, satisfaction_row])
212
+ feedback_input.submit(fn=refine_with_manual_feedback, inputs=[first_prompt_state, feedback_input, analysis_state], outputs=[analysis_accordion, final_prompt_output, first_prompt_state, satisfaction_row])
213
+
214
+ clear_btn.click(fn=clear_all, outputs=[image_input, situation_input, goal_input, analysis_accordion, final_prompt_output, satisfaction_row, feedback_col, prewrite_col, prewrite_choices, analysis_state, first_prompt_state])
215
+
216
+ # FIXED: Removed invalid 'pwa' argument
217
+ demo.launch(debug=True, pwa=True)