AshBlanc commited on
Commit
f1dcef6
Β·
verified Β·
1 Parent(s): 942018a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +658 -198
app.py CHANGED
@@ -4,150 +4,372 @@ import gradio as gr
4
  import google.generativeai as genai
5
  from PIL import Image
6
  import json
 
 
 
 
 
7
 
8
  # Load environment variables
9
  load_dotenv()
10
 
11
- # ### 1. Configure the AI Model
12
- def setup_api_key():
13
- """Set up the API key for the generative AI model."""
14
- try:
15
- api_key = os.environ.get('GEMINI_API_KEY')
16
- if not api_key:
17
- raise ValueError("GEMINI_API_KEY not found in environment variables.")
18
- genai.configure(api_key=api_key)
19
- print("πŸ”‘ API Key configured successfully.")
20
- return True
21
- except Exception as e:
22
- print(f"πŸ”΄ Error during API configuration: {e}")
23
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
- API_KEY_IS_SET = setup_api_key()
26
 
27
- # ### 2. Model Constants
28
- MODEL_VISION = 'gemini-2.5-pro'
29
- MODEL_INITIAL = 'gemini-2.5-flash'
30
- MODEL_REFINER = 'gemini-2.5-pro'
31
- MODEL_REWRITER = 'gemini-2.5-pro'
 
32
 
33
- # ### 3. System Instructions
34
- VISION_SYSTEM_INSTRUCTION = """
35
- You are an expert UI and content analyst. Analyze the provided screenshot and describe its key elements, including layout, text, and notable features. If no screenshot is provided, use the textual description as context. Provide a concise summary suitable for further processing, even if the input is minimal.
 
 
 
 
 
36
  """
37
 
38
  PROMPT_ENGINEER_SYSTEM_INSTRUCTION = """
39
- You are a meticulous AI Prompt Engineer. Based on the provided analysis and goal, craft a clear, specific, and high-quality prompt optimized for instructing an AI model. If the analysis or goal is minimal, infer a reasonable task and create a concise, actionable prompt that maximizes clarity and effectiveness.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  """
41
 
42
  PROMPT_REFINER_SYSTEM_INSTRUCTION = """
43
- You are an AI Prompt Refiner. Given an original prompt and user feedback, refine the prompt to address the feedback while maintaining its original intent. Output a single, improved prompt that is clear, concise, and actionable.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  """
45
 
46
  META_PROMPT_SYSTEM_INSTRUCTION = """
47
- You are an AI Prompt Optimization expert. Take a single input prompt and generate 3 distinct, improved variations that are clear, concise, and optimized for instructing an AI model. Each variation should enhance clarity, specificity, and effectiveness while maintaining the original intent.
48
- ## CRITICAL OUTPUT FORMAT
49
- Your output MUST be a valid JSON array of 3 strings, each a unique prompt variation. For example:
50
- ["Variation 1", "Variation 2", "Variation 3"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  """
52
 
53
- # ### 4. AI Processing Functions
 
 
 
 
 
54
  def analyze_screenshot(pil_image: Image.Image) -> str:
55
- """Analyze a screenshot and return a textual description."""
56
  if not isinstance(pil_image, Image.Image):
57
  return "Error: Invalid image provided."
58
- model = genai.GenerativeModel(MODEL_VISION, system_instruction=VISION_SYSTEM_INSTRUCTION)
 
 
 
59
  try:
60
- response = model.generate_content(pil_image)
61
- return response.text.strip() or "No meaningful content detected in the screenshot."
 
 
 
 
 
 
 
 
 
 
 
62
  except Exception as e:
63
- return f"Error in vision analysis: {e}"
 
 
64
 
65
- def initial_prompt_stream(analysis_text, goal):
66
- """Generate an initial prompt based on analysis and goal, streaming the output."""
67
- model = genai.GenerativeModel(MODEL_INITIAL, system_instruction=PROMPT_ENGINEER_SYSTEM_INSTRUCTION)
68
- prompt = f"**Analysis:**\n{analysis_text}\n\n**Goal:**\n{goal or 'Infer from analysis.'}"
69
- final_prompt_full = ""
 
70
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  for chunk in model.generate_content(prompt, stream=True):
72
- final_prompt_full += chunk.text
73
- yield final_prompt_full.strip()
 
 
74
  if not final_prompt_full.strip():
75
- final_prompt_full = "Generated prompt: Create a general-purpose AI instruction based on minimal context."
76
- yield final_prompt_full
 
77
  except Exception as e:
78
- yield f"Error in prompt generation: {e}"
 
 
79
 
80
- def refinement_prompt_stream(original_prompt, feedback):
81
- """Refine a prompt based on user feedback, streaming the output."""
82
- model = genai.GenerativeModel(MODEL_REFINER, system_instruction=PROMPT_REFINER_SYSTEM_INSTRUCTION)
83
- prompt = f"**Original Prompt:**\n{original_prompt}\n\n**Feedback:**\n{feedback}"
84
- final_prompt_full = ""
 
85
  try:
86
- for chunk in model.generate_content(prompt, stream=True):
87
- final_prompt_full += chunk.text
88
- yield final_prompt_full.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  if not final_prompt_full.strip():
90
- final_prompt_full = original_prompt # Fallback to original if refinement fails
91
- yield final_prompt_full
92
  except Exception as e:
93
- yield f"Error in prompt refinement: {e}"
 
 
94
 
95
- def rewrite_prompt_with_prewrite(original_prompt):
96
- """Generate three improved variations of a prompt."""
97
- model = genai.GenerativeModel(MODEL_REWRITER, system_instruction=META_PROMPT_SYSTEM_INSTRUCTION)
 
 
98
  try:
99
- response = model.generate_content(original_prompt)
100
- cleaned_response = response.text.strip().replace("```json", "").replace("```", "")
101
- variations = json.loads(cleaned_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  if isinstance(variations, list) and len(variations) >= 1:
103
- return variations + [""] * (3 - len(variations)) # Pad with empty strings if needed
 
 
 
 
104
  return ["Error: AI returned an invalid format.", "", ""]
 
 
 
105
  except Exception as e:
106
- return [f"Error: {e}", "The AI's response was not valid JSON.", ""]
 
 
107
 
108
- # ### 5. Gradio App Logic
109
- def run_analysis_step(pil_image, situation_text):
110
- """Analyze the input context (screenshot or text) and update the UI."""
 
111
  yield {
112
  satisfaction_row: gr.update(visible=False),
113
  feedback_col: gr.update(visible=False),
114
- prewrite_col: gr.update(visible=False)
 
 
 
115
  }
116
-
117
- if not API_KEY_IS_SET:
118
- return {
 
119
  analysis_accordion: gr.update(open=True),
120
- analysis_output: "Error: API Key not set.",
121
  final_prompt_output: "",
122
  satisfaction_row: gr.update(visible=False),
123
  feedback_col: gr.update(visible=False),
124
  prewrite_col: gr.update(visible=False),
125
  analysis_state: None
126
  }
127
-
 
128
  if pil_image is None and not situation_text.strip():
129
- return {
130
  analysis_accordion: gr.update(open=True),
131
- analysis_output: "Error: Please provide a screenshot or a situation description.",
132
  final_prompt_output: "",
133
  satisfaction_row: gr.update(visible=False),
134
  feedback_col: gr.update(visible=False),
135
  prewrite_col: gr.update(visible=False),
136
  analysis_state: None
137
  }
138
-
139
- yield {
140
- analysis_accordion: gr.update(open=True),
141
- analysis_output: "Processing context...",
142
- final_prompt_output: "",
143
- }
144
-
145
  try:
146
- analysis_text = analyze_screenshot(pil_image) if pil_image else situation_text.strip()
147
- print("Generated Analysis Text:", analysis_text)
148
- if not analysis_text:
149
- analysis_text = "No detailed analysis available; using raw input as context."
150
- return {
 
 
 
 
151
  analysis_accordion: gr.update(open=True),
152
  analysis_output: analysis_text,
153
  final_prompt_output: "",
@@ -156,10 +378,11 @@ def run_analysis_step(pil_image, situation_text):
156
  prewrite_col: gr.update(visible=False),
157
  analysis_state: analysis_text
158
  }
 
159
  except Exception as e:
160
- error_msg = f"Error during analysis: {str(e)}"
161
  print(error_msg)
162
- return {
163
  analysis_accordion: gr.update(open=True),
164
  analysis_output: error_msg,
165
  final_prompt_output: "",
@@ -169,39 +392,70 @@ def run_analysis_step(pil_image, situation_text):
169
  analysis_state: None
170
  }
171
 
172
- def run_streaming_generation(analysis, goal):
173
- """Generate and stream a prompt based on the analysis and goal."""
174
- print("Analysis State in Streaming:", analysis)
175
  if not analysis:
176
  yield {
177
- final_prompt_output: "Error: No analysis available for prompt generation.",
178
  first_prompt_state: None,
179
  satisfaction_row: gr.update(visible=False)
180
  }
181
  return
182
- yield {final_prompt_output: "Generating prompt..."}
 
 
 
 
 
183
  final_prompt_full = ""
184
  for chunk in initial_prompt_stream(analysis, goal):
185
  final_prompt_full = chunk
186
  yield {final_prompt_output: final_prompt_full}
 
187
  yield {
188
  final_prompt_output: final_prompt_full,
189
  first_prompt_state: final_prompt_full,
190
  satisfaction_row: gr.update(visible=True)
191
  }
192
 
193
- def handle_auto_refine(original_prompt):
194
- """Generate and display three prompt variations for user selection."""
 
 
 
 
 
 
 
195
  variations = rewrite_prompt_with_prewrite(original_prompt)
 
 
 
 
 
 
 
 
 
 
 
196
  return {
197
  prewrite_col: gr.update(visible=True),
198
- prewrite_choices: gr.update(choices=variations, value=variations[0] if variations else ""),
199
  satisfaction_row: gr.update(visible=False),
200
  feedback_col: gr.update(visible=False)
201
  }
202
 
203
- def select_rewritten_prompt(selected_prompt):
204
- """Update the final prompt with the selected variation."""
 
 
 
 
 
 
 
 
205
  return {
206
  final_prompt_output: selected_prompt,
207
  first_prompt_state: selected_prompt,
@@ -210,7 +464,7 @@ def select_rewritten_prompt(selected_prompt):
210
  }
211
 
212
  def handle_manual_feedback():
213
- """Show the feedback input area."""
214
  return {
215
  feedback_col: gr.update(visible=True),
216
  satisfaction_row: gr.update(visible=False),
@@ -218,23 +472,29 @@ def handle_manual_feedback():
218
  }
219
 
220
  def handle_like():
221
- """Hide all refinement options when the user is satisfied."""
222
  return {
223
  satisfaction_row: gr.update(visible=False),
224
  feedback_col: gr.update(visible=False),
225
  prewrite_col: gr.update(visible=False)
226
  }
227
 
228
- def refine_with_manual_feedback(original_prompt, feedback):
229
- """Refine the prompt based on manual feedback."""
230
  if not feedback.strip():
231
  yield {
232
  final_prompt_output: original_prompt,
233
  first_prompt_state: original_prompt,
234
- satisfaction_row: gr.update(visible=True)
 
235
  }
236
  return
237
- yield {final_prompt_output: "Refining manually..."}
 
 
 
 
 
238
  final_prompt_full = ""
239
  for chunk in refinement_prompt_stream(original_prompt, feedback):
240
  final_prompt_full = chunk
@@ -242,10 +502,14 @@ def refine_with_manual_feedback(original_prompt, feedback):
242
  final_prompt_output: final_prompt_full,
243
  first_prompt_state: final_prompt_full
244
  }
245
- yield {satisfaction_row: gr.update(visible=True)}
 
 
 
 
246
 
247
  def clear_all():
248
- """Reset the entire interface to its initial state."""
249
  return {
250
  image_input: None,
251
  situation_input: "",
@@ -257,100 +521,296 @@ def clear_all():
257
  feedback_col: gr.update(visible=False),
258
  prewrite_col: gr.update(visible=False),
259
  prewrite_choices: gr.update(choices=[], value=None),
 
260
  analysis_state: None,
261
  first_prompt_state: None
262
  }
263
 
264
- # ### 6. Build and Launch Gradio Interface
265
- theme = gr.themes.Base(
266
- primary_hue=gr.themes.colors.blue,
267
- secondary_hue=gr.themes.colors.neutral,
268
- font=(gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif")
269
- ).set(
270
- button_primary_background_fill_hover='*primary_600',
271
- button_primary_border_color_hover='*primary_600'
272
- )
273
-
274
- with gr.Blocks(theme=theme, analytics_enabled=False) as demo:
275
- # State variables
276
- analysis_state = gr.State()
277
- first_prompt_state = gr.State()
278
-
279
- # Header
280
- gr.Markdown("# πŸ€– Context-Aware Prompt Engineer AI (v22 - Revised)")
281
-
282
- # Layout
283
- with gr.Row(variant="panel"):
284
- with gr.Column(scale=1):
285
- with gr.Group():
286
- gr.Markdown("### 1. Provide Context")
287
- image_input = gr.Image(type="pil", label="Upload Screenshot")
288
- gr.HTML("<div style='text-align: center; font-weight: 500;'>OR</div>")
289
- situation_input = gr.Textbox(label="Describe Your Situation", lines=6)
290
- with gr.Accordion("2. (Optional) Clarify Goal", open=False):
291
- goal_input = gr.Textbox(label=None, show_label=False)
292
- with gr.Row():
293
- submit_btn = gr.Button("πŸš€ Generate Prompt", variant="primary", scale=3)
294
- clear_btn = gr.Button("πŸ”„ Start Over", scale=1)
295
- with gr.Column(scale=2):
296
- with gr.Accordion("Show AI's Context Analysis", open=False) as analysis_accordion:
297
- analysis_output = gr.Textbox(label=None, lines=7, interactive=False, show_label=False, value="")
298
- final_prompt_output = gr.Textbox(label="βœ… Final Engineered Prompt", lines=12, interactive=False, show_copy_button=True)
299
-
300
- # Refinement UI
301
- with gr.Row(visible=False) as satisfaction_row:
302
- gr.Markdown("#### Are you satisfied, or would you like to refine the prompt?")
303
- like_btn = gr.Button("πŸ‘ Yes, Looks Good")
304
- auto_refine_btn = gr.Button("πŸ€– Auto-Refine (PRewrite)")
305
- dislike_btn = gr.Button("πŸ‘Ž Manual Feedback")
306
-
307
- with gr.Column(visible=False) as prewrite_col:
308
- gr.Markdown("### PRewrite: AI-Generated Variations")
309
- prewrite_choices = gr.Radio(label="Select the best variation:", type="value")
310
- select_version_btn = gr.Button("βœ… Use This Version", variant="primary")
311
-
312
- with gr.Column(visible=False) as feedback_col:
313
- feedback_input = gr.Textbox(label="What would you like to change or improve?", lines=3)
314
- refine_btn = gr.Button("πŸ› οΈ Refine Manually", variant="primary")
315
-
316
- # Event Handlers
317
- analysis_outputs = [analysis_accordion, analysis_output, final_prompt_output, satisfaction_row, feedback_col, prewrite_col, analysis_state]
318
- streaming_outputs = [final_prompt_output, first_prompt_state, satisfaction_row]
319
-
320
- submit_btn.click(
321
- fn=run_analysis_step,
322
- inputs=[image_input, situation_input],
323
- outputs=analysis_outputs,
324
- show_progress="hidden"
325
- ).then(
326
- fn=run_streaming_generation,
327
- inputs=[analysis_state, goal_input],
328
- outputs=streaming_outputs,
329
- show_progress="hidden"
330
- )
331
-
332
- goal_input.submit(
333
- fn=run_analysis_step,
334
- inputs=[image_input, situation_input],
335
- outputs=analysis_outputs,
336
- show_progress="hidden"
337
- ).then(
338
- fn=run_streaming_generation,
339
- inputs=[analysis_state, goal_input],
340
- outputs=streaming_outputs,
341
- show_progress="hidden"
342
- )
343
-
344
- like_btn.click(fn=handle_like, outputs=[satisfaction_row, feedback_col, prewrite_col])
345
- auto_refine_btn.click(fn=handle_auto_refine, inputs=[first_prompt_state], outputs=[prewrite_col, prewrite_choices, satisfaction_row, feedback_col])
346
- dislike_btn.click(fn=handle_manual_feedback, outputs=[feedback_col, satisfaction_row, prewrite_col])
347
- select_version_btn.click(fn=select_rewritten_prompt, inputs=[prewrite_choices], outputs=[final_prompt_output, first_prompt_state, satisfaction_row, prewrite_col])
348
- refine_btn.click(fn=refine_with_manual_feedback, inputs=[first_prompt_state, feedback_input], outputs=[final_prompt_output, first_prompt_state, satisfaction_row])
349
- feedback_input.submit(fn=refine_with_manual_feedback, inputs=[first_prompt_state, feedback_input], outputs=[final_prompt_output, first_prompt_state, satisfaction_row])
350
-
351
- clear_btn.click(
352
- fn=clear_all,
353
- outputs=[image_input, situation_input, goal_input, analysis_accordion, analysis_output, final_prompt_output, satisfaction_row, feedback_col, prewrite_col, prewrite_choices, analysis_state, first_prompt_state]
354
  )
355
-
356
- demo.launch(debug=True, pwa=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import google.generativeai as genai
5
  from PIL import Image
6
  import json
7
+ import asyncio
8
+ import threading
9
+ from typing import Optional, List, Dict, Any
10
+ import time
11
+ from functools import lru_cache
12
 
13
  # Load environment variables
14
  load_dotenv()
15
 
16
+ # ### 1. Enhanced Configuration with Error Handling
17
+ class APIManager:
18
+ """Centralized API management with connection pooling and error handling."""
19
+
20
+ def __init__(self):
21
+ self.api_key = None
22
+ self.is_configured = False
23
+ self.models = {}
24
+ self.setup_api()
25
+
26
+ def setup_api(self):
27
+ """Enhanced API setup with better error handling."""
28
+ try:
29
+ self.api_key = os.environ.get('GEMINI_API_KEY')
30
+ if not self.api_key:
31
+ raise ValueError("GEMINI_API_KEY not found in environment variables.")
32
+
33
+ genai.configure(api_key=self.api_key)
34
+ self.is_configured = True
35
+ print("πŸ”‘ API Key configured successfully.")
36
+
37
+ # Pre-initialize models for better performance
38
+ self.initialize_models()
39
+
40
+ except Exception as e:
41
+ print(f"πŸ”΄ Error during API configuration: {e}")
42
+ self.is_configured = False
43
+
44
+ def initialize_models(self):
45
+ """Pre-initialize models for better performance."""
46
+ try:
47
+ self.models = {
48
+ 'vision': genai.GenerativeModel('gemini-2.5-pro',
49
+ system_instruction=VISION_SYSTEM_INSTRUCTION),
50
+ 'initial': genai.GenerativeModel('gemini-2.5-flash-lite-preview-06-17',
51
+ system_instruction=PROMPT_ENGINEER_SYSTEM_INSTRUCTION),
52
+ 'refiner': genai.GenerativeModel('gemini-2.5-pro',
53
+ system_instruction=PROMPT_REFINER_SYSTEM_INSTRUCTION),
54
+ 'rewriter': genai.GenerativeModel('gemini-2.5-flash',
55
+ system_instruction=META_PROMPT_SYSTEM_INSTRUCTION)
56
+ }
57
+ except Exception as e:
58
+ print(f"⚠️ Warning: Could not pre-initialize models: {e}")
59
+
60
+ # Global API manager instance
61
+ api_manager = APIManager()
62
+
63
+ # ### 2. Enhanced System Instructions with Better Prompting
64
+ VISION_SYSTEM_INSTRUCTION = """
65
+ You are an expert UI/UX and content analyst with deep knowledge of design patterns and user experience principles.
66
 
67
+ TASK: Analyze the provided screenshot or content and extract actionable insights.
68
 
69
+ ANALYSIS FRAMEWORK:
70
+ 1. **Visual Hierarchy**: Identify key elements, layout structure, and information architecture
71
+ 2. **Content Strategy**: Analyze text, messaging, and content organization
72
+ 3. **User Flow**: Understand the intended user journey and interaction patterns
73
+ 4. **Design Patterns**: Recognize UI components, design systems, and visual elements
74
+ 5. **Context Clues**: Infer purpose, target audience, and business objectives
75
 
76
+ OUTPUT FORMAT:
77
+ - Be specific and actionable
78
+ - Focus on elements that would inform prompt creation
79
+ - Highlight pain points or opportunities for improvement
80
+ - Use clear, professional language
81
+ - Keep analysis concise but comprehensive (200-400 words)
82
+
83
+ If no screenshot is provided, analyze the textual description with the same framework.
84
  """
85
 
86
  PROMPT_ENGINEER_SYSTEM_INSTRUCTION = """
87
+ You are a world-class AI Prompt Engineer specializing in creating high-performance prompts that maximize AI model capabilities.
88
+
89
+ EXPERTISE AREAS:
90
+ - Prompt architecture and optimization
91
+ - AI model behavior and limitations
92
+ - Task decomposition and instruction design
93
+ - Output formatting and constraints
94
+ - Performance optimization techniques
95
+
96
+ PROMPT CREATION PRINCIPLES:
97
+ 1. **Clarity**: Use precise, unambiguous language
98
+ 2. **Structure**: Organize instructions logically with clear sections
99
+ 3. **Specificity**: Define exact requirements and expectations
100
+ 4. **Context**: Provide necessary background information
101
+ 5. **Constraints**: Set appropriate boundaries and limitations
102
+ 6. **Examples**: Include relevant examples when helpful
103
+ 7. **Output Format**: Specify desired response structure
104
+
105
+ TASK: Create a professional, optimized prompt based on the provided analysis and goal.
106
+
107
+ OUTPUT REQUIREMENTS:
108
+ - Start with a clear role definition for the AI
109
+ - Include specific task instructions
110
+ - Add relevant context and constraints
111
+ - Specify output format if needed
112
+ - Optimize for the intended AI model's capabilities
113
+ - Ensure the prompt is actionable and measurable
114
+
115
+ Length: 100-300 words (unless complexity requires more)
116
  """
117
 
118
  PROMPT_REFINER_SYSTEM_INSTRUCTION = """
119
+ You are an expert AI Prompt Optimization specialist focused on iterative improvement.
120
+
121
+ TASK: Refine the given prompt based on user feedback while preserving core intent.
122
+
123
+ REFINEMENT APPROACH:
124
+ 1. **Analyze Feedback**: Understand specific improvement requests
125
+ 2. **Identify Issues**: Pinpoint areas needing enhancement
126
+ 3. **Preserve Intent**: Maintain original purpose and goals
127
+ 4. **Enhance Clarity**: Improve language and structure
128
+ 5. **Optimize Performance**: Adjust for better AI model response
129
+
130
+ REFINEMENT TECHNIQUES:
131
+ - Add missing context or constraints
132
+ - Improve instruction clarity
133
+ - Enhance output specifications
134
+ - Adjust tone and style
135
+ - Optimize prompt structure
136
+ - Add or modify examples
137
+
138
+ OUTPUT: A single, improved prompt that addresses the feedback while maintaining effectiveness.
139
  """
140
 
141
  META_PROMPT_SYSTEM_INSTRUCTION = """
142
+ You are an AI Prompt Optimization expert specializing in creating multiple high-quality variations.
143
+
144
+ TASK: Generate 3 distinct, improved variations of the input prompt.
145
+
146
+ VARIATION STRATEGY:
147
+ 1. **Variation 1**: Enhanced clarity and structure
148
+ 2. **Variation 2**: Different approach or perspective
149
+ 3. **Variation 3**: Optimized for specific use case or context
150
+
151
+ OPTIMIZATION TECHNIQUES:
152
+ - Reframe instructions for better comprehension
153
+ - Adjust specificity levels
154
+ - Modify tone and style
155
+ - Enhance context and examples
156
+ - Improve output formatting
157
+ - Optimize for different AI model strengths
158
+
159
+ CRITICAL OUTPUT FORMAT:
160
+ Return ONLY a valid JSON array of exactly 3 strings:
161
+ ["Variation 1 text here", "Variation 2 text here", "Variation 3 text here"]
162
+
163
+ Each variation should be complete, standalone, and ready to use.
164
  """
165
 
166
+ # ### 3. Enhanced Processing Functions with Better Error Handling
167
+ @lru_cache(maxsize=128)
168
+ def analyze_screenshot_cached(image_hash: str, pil_image: Image.Image) -> str:
169
+ """Cached screenshot analysis for better performance."""
170
+ return analyze_screenshot(pil_image)
171
+
172
  def analyze_screenshot(pil_image: Image.Image) -> str:
173
+ """Enhanced screenshot analysis with better error handling."""
174
  if not isinstance(pil_image, Image.Image):
175
  return "Error: Invalid image provided."
176
+
177
+ if not api_manager.is_configured:
178
+ return "Error: API not configured. Please check your API key."
179
+
180
  try:
181
+ model = api_manager.models.get('vision') or genai.GenerativeModel(
182
+ 'gemini-2.0-flash-exp',
183
+ system_instruction=VISION_SYSTEM_INSTRUCTION
184
+ )
185
+
186
+ response = model.generate_content([
187
+ "Please analyze this screenshot following the framework provided in your system instructions.",
188
+ pil_image
189
+ ])
190
+
191
+ result = response.text.strip()
192
+ return result if result else "No meaningful content detected in the screenshot."
193
+
194
  except Exception as e:
195
+ error_msg = f"Error in vision analysis: {str(e)}"
196
+ print(error_msg)
197
+ return error_msg
198
 
199
+ def initial_prompt_stream(analysis_text: str, goal: str):
200
+ """Enhanced streaming prompt generation with better error handling."""
201
+ if not api_manager.is_configured:
202
+ yield "Error: API not configured. Please check your API key."
203
+ return
204
+
205
  try:
206
+ model = api_manager.models.get('initial') or genai.GenerativeModel(
207
+ 'gemini-2.0-flash-exp',
208
+ system_instruction=PROMPT_ENGINEER_SYSTEM_INSTRUCTION
209
+ )
210
+
211
+ # Enhanced prompt construction
212
+ prompt_parts = [
213
+ "**CONTEXT ANALYSIS:**",
214
+ analysis_text,
215
+ "\n**USER GOAL:**",
216
+ goal or "Create an optimized prompt based on the provided analysis.",
217
+ "\n**TASK:** Create a professional, optimized prompt that addresses the context and achieves the specified goal."
218
+ ]
219
+
220
+ prompt = "\n".join(prompt_parts)
221
+ final_prompt_full = ""
222
+
223
  for chunk in model.generate_content(prompt, stream=True):
224
+ if chunk.text:
225
+ final_prompt_full += chunk.text
226
+ yield final_prompt_full.strip()
227
+
228
  if not final_prompt_full.strip():
229
+ fallback = "Create a comprehensive prompt based on the provided context to achieve optimal AI performance."
230
+ yield fallback
231
+
232
  except Exception as e:
233
+ error_msg = f"Error in prompt generation: {str(e)}"
234
+ print(error_msg)
235
+ yield error_msg
236
 
237
+ def refinement_prompt_stream(original_prompt: str, feedback: str):
238
+ """Enhanced prompt refinement with better streaming."""
239
+ if not api_manager.is_configured:
240
+ yield "Error: API not configured. Please check your API key."
241
+ return
242
+
243
  try:
244
+ model = api_manager.models.get('refiner') or genai.GenerativeModel(
245
+ 'gemini-2.0-flash-exp',
246
+ system_instruction=PROMPT_REFINER_SYSTEM_INSTRUCTION
247
+ )
248
+
249
+ refinement_prompt = f"""
250
+ **ORIGINAL PROMPT:**
251
+ {original_prompt}
252
+
253
+ **USER FEEDBACK:**
254
+ {feedback}
255
+
256
+ **TASK:** Refine the original prompt based on the feedback while preserving its core intent and improving its effectiveness.
257
+ """
258
+
259
+ final_prompt_full = ""
260
+ for chunk in model.generate_content(refinement_prompt, stream=True):
261
+ if chunk.text:
262
+ final_prompt_full += chunk.text
263
+ yield final_prompt_full.strip()
264
+
265
  if not final_prompt_full.strip():
266
+ yield original_prompt # Fallback to original
267
+
268
  except Exception as e:
269
+ error_msg = f"Error in prompt refinement: {str(e)}"
270
+ print(error_msg)
271
+ yield error_msg
272
 
273
+ def rewrite_prompt_with_prewrite(original_prompt: str) -> List[str]:
274
+ """Enhanced prompt rewriting with better JSON parsing."""
275
+ if not api_manager.is_configured:
276
+ return ["Error: API not configured. Please check your API key.", "", ""]
277
+
278
  try:
279
+ model = api_manager.models.get('rewriter') or genai.GenerativeModel(
280
+ 'gemini-2.0-flash-exp',
281
+ system_instruction=META_PROMPT_SYSTEM_INSTRUCTION
282
+ )
283
+
284
+ rewrite_prompt = f"""
285
+ Generate 3 improved variations of this prompt:
286
+
287
+ {original_prompt}
288
+
289
+ Remember: Output ONLY a valid JSON array of 3 strings.
290
+ """
291
+
292
+ response = model.generate_content(rewrite_prompt)
293
+
294
+ # Enhanced JSON parsing
295
+ response_text = response.text.strip()
296
+
297
+ # Clean up common formatting issues
298
+ response_text = response_text.replace("```json", "").replace("```", "").strip()
299
+
300
+ # Try to extract JSON if it's wrapped in other text
301
+ if not response_text.startswith('['):
302
+ import re
303
+ json_match = re.search(r'\[.*\]', response_text, re.DOTALL)
304
+ if json_match:
305
+ response_text = json_match.group(0)
306
+
307
+ variations = json.loads(response_text)
308
+
309
  if isinstance(variations, list) and len(variations) >= 1:
310
+ # Ensure we have exactly 3 variations
311
+ while len(variations) < 3:
312
+ variations.append("")
313
+ return variations[:3]
314
+
315
  return ["Error: AI returned an invalid format.", "", ""]
316
+
317
+ except json.JSONDecodeError:
318
+ return ["Error: Could not parse AI response as JSON.", "", ""]
319
  except Exception as e:
320
+ error_msg = f"Error in prompt rewriting: {str(e)}"
321
+ print(error_msg)
322
+ return [error_msg, "", ""]
323
 
324
+ # ### 4. Enhanced Gradio Interface Functions
325
+ def run_analysis_step(pil_image: Optional[Image.Image], situation_text: str):
326
+ """Enhanced analysis step with better state management."""
327
+ # Reset UI state
328
  yield {
329
  satisfaction_row: gr.update(visible=False),
330
  feedback_col: gr.update(visible=False),
331
+ prewrite_col: gr.update(visible=False),
332
+ analysis_accordion: gr.update(open=True),
333
+ analysis_output: "πŸ” Analyzing context...",
334
+ final_prompt_output: ""
335
  }
336
+
337
+ # Validation
338
+ if not api_manager.is_configured:
339
+ yield {
340
  analysis_accordion: gr.update(open=True),
341
+ analysis_output: "❌ Error: API Key not configured. Please check your GEMINI_API_KEY environment variable.",
342
  final_prompt_output: "",
343
  satisfaction_row: gr.update(visible=False),
344
  feedback_col: gr.update(visible=False),
345
  prewrite_col: gr.update(visible=False),
346
  analysis_state: None
347
  }
348
+ return
349
+
350
  if pil_image is None and not situation_text.strip():
351
+ yield {
352
  analysis_accordion: gr.update(open=True),
353
+ analysis_output: "⚠️ Please provide either a screenshot or a situation description to proceed.",
354
  final_prompt_output: "",
355
  satisfaction_row: gr.update(visible=False),
356
  feedback_col: gr.update(visible=False),
357
  prewrite_col: gr.update(visible=False),
358
  analysis_state: None
359
  }
360
+ return
361
+
362
+ # Perform analysis
 
 
 
 
363
  try:
364
+ if pil_image:
365
+ analysis_text = analyze_screenshot(pil_image)
366
+ else:
367
+ analysis_text = situation_text.strip()
368
+
369
+ if not analysis_text or analysis_text.startswith("Error"):
370
+ analysis_text = analysis_text or "Unable to generate analysis. Please try again."
371
+
372
+ yield {
373
  analysis_accordion: gr.update(open=True),
374
  analysis_output: analysis_text,
375
  final_prompt_output: "",
 
378
  prewrite_col: gr.update(visible=False),
379
  analysis_state: analysis_text
380
  }
381
+
382
  except Exception as e:
383
+ error_msg = f"❌ Error during analysis: {str(e)}"
384
  print(error_msg)
385
+ yield {
386
  analysis_accordion: gr.update(open=True),
387
  analysis_output: error_msg,
388
  final_prompt_output: "",
 
392
  analysis_state: None
393
  }
394
 
395
+ def run_streaming_generation(analysis: str, goal: str):
396
+ """Enhanced streaming generation with better progress indication."""
 
397
  if not analysis:
398
  yield {
399
+ final_prompt_output: "❌ Error: No analysis available for prompt generation.",
400
  first_prompt_state: None,
401
  satisfaction_row: gr.update(visible=False)
402
  }
403
  return
404
+
405
+ yield {
406
+ final_prompt_output: "πŸš€ Generating optimized prompt...",
407
+ satisfaction_row: gr.update(visible=False)
408
+ }
409
+
410
  final_prompt_full = ""
411
  for chunk in initial_prompt_stream(analysis, goal):
412
  final_prompt_full = chunk
413
  yield {final_prompt_output: final_prompt_full}
414
+
415
  yield {
416
  final_prompt_output: final_prompt_full,
417
  first_prompt_state: final_prompt_full,
418
  satisfaction_row: gr.update(visible=True)
419
  }
420
 
421
+ def handle_auto_refine(original_prompt: str):
422
+ """Enhanced auto-refinement with better user feedback."""
423
+ if not original_prompt:
424
+ return {
425
+ prewrite_col: gr.update(visible=False),
426
+ satisfaction_row: gr.update(visible=True),
427
+ feedback_col: gr.update(visible=False)
428
+ }
429
+
430
  variations = rewrite_prompt_with_prewrite(original_prompt)
431
+
432
+ # Filter out empty variations
433
+ valid_variations = [v for v in variations if v.strip()]
434
+
435
+ if not valid_variations:
436
+ return {
437
+ prewrite_col: gr.update(visible=False),
438
+ satisfaction_row: gr.update(visible=True),
439
+ feedback_col: gr.update(visible=False)
440
+ }
441
+
442
  return {
443
  prewrite_col: gr.update(visible=True),
444
+ prewrite_choices: gr.update(choices=valid_variations, value=valid_variations[0]),
445
  satisfaction_row: gr.update(visible=False),
446
  feedback_col: gr.update(visible=False)
447
  }
448
 
449
+ def select_rewritten_prompt(selected_prompt: str):
450
+ """Enhanced prompt selection with validation."""
451
+ if not selected_prompt or not selected_prompt.strip():
452
+ return {
453
+ final_prompt_output: "❌ Error: No prompt selected.",
454
+ first_prompt_state: None,
455
+ satisfaction_row: gr.update(visible=False),
456
+ prewrite_col: gr.update(visible=False)
457
+ }
458
+
459
  return {
460
  final_prompt_output: selected_prompt,
461
  first_prompt_state: selected_prompt,
 
464
  }
465
 
466
  def handle_manual_feedback():
467
+ """Show feedback input area."""
468
  return {
469
  feedback_col: gr.update(visible=True),
470
  satisfaction_row: gr.update(visible=False),
 
472
  }
473
 
474
  def handle_like():
475
+ """Hide refinement options when user is satisfied."""
476
  return {
477
  satisfaction_row: gr.update(visible=False),
478
  feedback_col: gr.update(visible=False),
479
  prewrite_col: gr.update(visible=False)
480
  }
481
 
482
+ def refine_with_manual_feedback(original_prompt: str, feedback: str):
483
+ """Enhanced manual refinement with better streaming."""
484
  if not feedback.strip():
485
  yield {
486
  final_prompt_output: original_prompt,
487
  first_prompt_state: original_prompt,
488
+ satisfaction_row: gr.update(visible=True),
489
+ feedback_col: gr.update(visible=False)
490
  }
491
  return
492
+
493
+ yield {
494
+ final_prompt_output: "πŸ› οΈ Refining prompt based on your feedback...",
495
+ satisfaction_row: gr.update(visible=False)
496
+ }
497
+
498
  final_prompt_full = ""
499
  for chunk in refinement_prompt_stream(original_prompt, feedback):
500
  final_prompt_full = chunk
 
502
  final_prompt_output: final_prompt_full,
503
  first_prompt_state: final_prompt_full
504
  }
505
+
506
+ yield {
507
+ satisfaction_row: gr.update(visible=True),
508
+ feedback_col: gr.update(visible=False)
509
+ }
510
 
511
  def clear_all():
512
+ """Enhanced reset function with complete state clearing."""
513
  return {
514
  image_input: None,
515
  situation_input: "",
 
521
  feedback_col: gr.update(visible=False),
522
  prewrite_col: gr.update(visible=False),
523
  prewrite_choices: gr.update(choices=[], value=None),
524
+ feedback_input: "",
525
  analysis_state: None,
526
  first_prompt_state: None
527
  }
528
 
529
+ # ### 5. Enhanced UI with Modern Design
530
+ def create_enhanced_interface():
531
+ """Create the enhanced Gradio interface."""
532
+
533
+ # Custom CSS for better styling
534
+ custom_css = """
535
+ .gradio-container {
536
+ max-width: 1200px !important;
537
+ margin: auto;
538
+ }
539
+
540
+ .generate-btn {
541
+ background: linear-gradient(45deg, #007bff, #0056b3) !important;
542
+ border: none !important;
543
+ color: white !important;
544
+ font-weight: 600 !important;
545
+ transition: all 0.3s ease !important;
546
+ }
547
+
548
+ .generate-btn:hover {
549
+ transform: translateY(-2px) !important;
550
+ box-shadow: 0 4px 12px rgba(0, 123, 255, 0.3) !important;
551
+ }
552
+
553
+ .status-indicator {
554
+ padding: 8px 16px;
555
+ border-radius: 20px;
556
+ font-size: 14px;
557
+ font-weight: 500;
558
+ margin: 8px 0;
559
+ }
560
+
561
+ .status-success {
562
+ background: #d4edda;
563
+ color: #155724;
564
+ border: 1px solid #c3e6cb;
565
+ }
566
+
567
+ .status-error {
568
+ background: #f8d7da;
569
+ color: #721c24;
570
+ border: 1px solid #f5c6cb;
571
+ }
572
+
573
+ .status-processing {
574
+ background: #d1ecf1;
575
+ color: #0c5460;
576
+ border: 1px solid #bee5eb;
577
+ }
578
+ """
579
+
580
+ # Enhanced theme
581
+ theme = gr.themes.Soft(
582
+ primary_hue=gr.themes.colors.blue,
583
+ secondary_hue=gr.themes.colors.neutral,
584
+ neutral_hue=gr.themes.colors.slate,
585
+ font=(gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif")
586
+ ).set(
587
+ button_primary_background_fill='*primary_500',
588
+ button_primary_background_fill_hover='*primary_600',
589
+ button_primary_border_color='*primary_500',
590
+ button_primary_border_color_hover='*primary_600',
591
+ button_secondary_background_fill='*neutral_100',
592
+ button_secondary_text_color='*neutral_800'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
593
  )
594
+
595
+ with gr.Blocks(theme=theme, css=custom_css, title="AI Prompt Engineer") as interface:
596
+ # State management
597
+ analysis_state = gr.State()
598
+ first_prompt_state = gr.State()
599
+
600
+ # Header
601
+ gr.Markdown("""
602
+ # πŸš€ AI Prompt Engineer Pro
603
+ ### Transform your ideas into optimized AI prompts with intelligent analysis and refinement
604
+ """)
605
+
606
+ # Status indicator
607
+ with gr.Row():
608
+ with gr.Column():
609
+ if api_manager.is_configured:
610
+ gr.HTML('<div class="status-indicator status-success">βœ… API Connected & Ready</div>')
611
+ else:
612
+ gr.HTML('<div class="status-indicator status-error">❌ API Configuration Error</div>')
613
+
614
+ # Main interface
615
+ with gr.Row(equal_height=True):
616
+ # Input column
617
+ with gr.Column(scale=1, min_width=400):
618
+ with gr.Group():
619
+ gr.Markdown("### πŸ“ Provide Context")
620
+
621
+ with gr.Tabs():
622
+ with gr.Tab("πŸ“Έ Screenshot"):
623
+ image_input = gr.Image(
624
+ type="pil",
625
+ label="Upload Screenshot",
626
+ sources=['upload'],
627
+ interactive=True
628
+ )
629
+
630
+ with gr.Tab("✍️ Text Description"):
631
+ situation_input = gr.Textbox(
632
+ label="Describe Your Situation",
633
+ placeholder="Describe what you need help with, the context, or the task you want to accomplish...",
634
+ lines=6,
635
+ max_lines=10
636
+ )
637
+
638
+ with gr.Accordion("🎯 Optional: Specify Goal", open=False):
639
+ goal_input = gr.Textbox(
640
+ label="What do you want to achieve?",
641
+ placeholder="e.g., 'Create a prompt for generating marketing copy' or 'Help me write better documentation'",
642
+ lines=2,
643
+ show_label=False
644
+ )
645
+
646
+ with gr.Row():
647
+ submit_btn = gr.Button(
648
+ "πŸš€ Generate Prompt",
649
+ variant="primary",
650
+ scale=3,
651
+ elem_classes=["generate-btn"]
652
+ )
653
+ clear_btn = gr.Button("πŸ”„ Reset", scale=1)
654
+
655
+ # Output column
656
+ with gr.Column(scale=2, min_width=600):
657
+ with gr.Accordion("πŸ” AI Analysis", open=False) as analysis_accordion:
658
+ analysis_output = gr.Textbox(
659
+ label="Context Analysis",
660
+ lines=6,
661
+ interactive=False,
662
+ show_copy_button=True,
663
+ placeholder="AI analysis will appear here..."
664
+ )
665
+
666
+ final_prompt_output = gr.Textbox(
667
+ label="βœ… Optimized Prompt",
668
+ lines=12,
669
+ interactive=False,
670
+ show_copy_button=True,
671
+ placeholder="Your optimized prompt will appear here..."
672
+ )
673
+
674
+ # Refinement interface
675
+ with gr.Row(visible=False) as satisfaction_row:
676
+ with gr.Column():
677
+ gr.Markdown("### 🎨 Refinement Options")
678
+ gr.Markdown("*How would you like to improve this prompt?*")
679
+
680
+ with gr.Row():
681
+ like_btn = gr.Button("πŸ‘ Perfect!", variant="secondary")
682
+ auto_refine_btn = gr.Button("πŸ€– Auto-Refine", variant="primary")
683
+ dislike_btn = gr.Button("✏️ Manual Feedback", variant="secondary")
684
+
685
+ # Auto-refinement section
686
+ with gr.Column(visible=False) as prewrite_col:
687
+ gr.Markdown("### πŸ”„ Auto-Generated Variations")
688
+ gr.Markdown("*Select the variation that best fits your needs:*")
689
+
690
+ prewrite_choices = gr.Radio(
691
+ label="Choose your preferred version:",
692
+ type="value",
693
+ interactive=True
694
+ )
695
+ select_version_btn = gr.Button("βœ… Use This Version", variant="primary")
696
+
697
+ # Manual feedback section
698
+ with gr.Column(visible=False) as feedback_col:
699
+ gr.Markdown("### πŸ’¬ Manual Refinement")
700
+ feedback_input = gr.Textbox(
701
+ label="What would you like to improve?",
702
+ placeholder="e.g., 'Make it more specific', 'Add examples', 'Change the tone to be more professional'...",
703
+ lines=3
704
+ )
705
+ refine_btn = gr.Button("πŸ› οΈ Refine Prompt", variant="primary")
706
+
707
+ # Event handlers with better error handling
708
+ def safe_wrapper(func):
709
+ """Wrapper for safe function execution."""
710
+ def wrapper(*args, **kwargs):
711
+ try:
712
+ return func(*args, **kwargs)
713
+ except Exception as e:
714
+ print(f"Error in {func.__name__}: {e}")
715
+ return {"error": str(e)}
716
+ return wrapper
717
+
718
+ # Main workflow
719
+ analysis_outputs = [
720
+ analysis_accordion, analysis_output, final_prompt_output,
721
+ satisfaction_row, feedback_col, prewrite_col, analysis_state
722
+ ]
723
+
724
+ streaming_outputs = [final_prompt_output, first_prompt_state, satisfaction_row]
725
+
726
+ # Event bindings
727
+ submit_btn.click(
728
+ fn=run_analysis_step,
729
+ inputs=[image_input, situation_input],
730
+ outputs=analysis_outputs,
731
+ show_progress="minimal"
732
+ ).then(
733
+ fn=run_streaming_generation,
734
+ inputs=[analysis_state, goal_input],
735
+ outputs=streaming_outputs,
736
+ show_progress="minimal"
737
+ )
738
+
739
+ # Auto-submission on goal input
740
+ goal_input.submit(
741
+ fn=run_analysis_step,
742
+ inputs=[image_input, situation_input],
743
+ outputs=analysis_outputs,
744
+ show_progress="minimal"
745
+ ).then(
746
+ fn=run_streaming_generation,
747
+ inputs=[analysis_state, goal_input],
748
+ outputs=streaming_outputs,
749
+ show_progress="minimal"
750
+ )
751
+
752
+ # Refinement handlers
753
+ like_btn.click(
754
+ fn=handle_like,
755
+ outputs=[satisfaction_row, feedback_col, prewrite_col]
756
+ )
757
+
758
+ auto_refine_btn.click(
759
+ fn=handle_auto_refine,
760
+ inputs=[first_prompt_state],
761
+ outputs=[prewrite_col, prewrite_choices, satisfaction_row, feedback_col]
762
+ )
763
+
764
+ dislike_btn.click(
765
+ fn=handle_manual_feedback,
766
+ outputs=[feedback_col, satisfaction_row, prewrite_col]
767
+ )
768
+
769
+ select_version_btn.click(
770
+ fn=select_rewritten_prompt,
771
+ inputs=[prewrite_choices],
772
+ outputs=[final_prompt_output, first_prompt_state, satisfaction_row, prewrite_col]
773
+ )
774
+
775
+ refine_btn.click(
776
+ fn=refine_with_manual_feedback,
777
+ inputs=[first_prompt_state, feedback_input],
778
+ outputs=[final_prompt_output, first_prompt_state, satisfaction_row, feedback_col]
779
+ )
780
+
781
+ feedback_input.submit(
782
+ fn=refine_with_manual_feedback,
783
+ inputs=[first_prompt_state, feedback_input],
784
+ outputs=[final_prompt_output, first_prompt_state, satisfaction_row, feedback_col]
785
+ )
786
+
787
+ # Reset functionality
788
+ clear_btn.click(
789
+ fn=clear_all,
790
+ outputs=[
791
+ image_input, situation_input, goal_input, analysis_accordion,
792
+ analysis_output, final_prompt_output, satisfaction_row,
793
+ feedback_col, prewrite_col, prewrite_choices, feedback_input,
794
+ analysis_state, first_prompt_state
795
+ ]
796
+ )
797
+
798
+ return interface
799
+
800
+ # ### 6. Launch Configuration
801
+ if __name__ == "__main__":
802
+ # Create and launch the enhanced interface
803
+ demo = create_enhanced_interface()
804
+
805
+ # Launch with optimal settings
806
+ demo.launch(
807
+ debug=True,
808
+ share=False,
809
+ inbrowser=True,
810
+ server_name="0.0.0.0",
811
+ server_port=7860,
812
+ show_error=True,
813
+ favicon_path=None,
814
+ ssl_verify=False,
815
+ quiet=False
816
+ )