fix error
Browse files
app.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import torch
|
| 3 |
from transformers import AutoModel, AutoTokenizer
|
|
@@ -34,7 +35,7 @@ def find_result_image(path):
|
|
| 34 |
|
| 35 |
# --- 2. Main Processing Function (UPDATED for multi-bbox drawing) ---
|
| 36 |
@spaces.GPU
|
| 37 |
-
def process_ocr_task(image, model_size, ref_text, task_type
|
| 38 |
"""
|
| 39 |
Processes an image with DeepSeek-OCR for all supported tasks.
|
| 40 |
Now draws ALL detected bounding boxes for ANY task.
|
|
@@ -151,7 +152,8 @@ with gr.Blocks(title="Text Extraction Demo", theme=gr.themes.Soft()) as demo:
|
|
| 151 |
free_output_image = gr.Image(label="๐ผ๏ธ Image Result (if any)", type="pil")
|
| 152 |
|
| 153 |
# Wire Free OCR button
|
| 154 |
-
|
|
|
|
| 155 |
|
| 156 |
with gr.TabItem("Locate"):
|
| 157 |
with gr.Row():
|
|
@@ -167,7 +169,8 @@ with gr.Blocks(title="Text Extraction Demo", theme=gr.themes.Soft()) as demo:
|
|
| 167 |
loc_output_image = gr.Image(label="๐ผ๏ธ Image Result (if any)", type="pil")
|
| 168 |
|
| 169 |
# Wire Locate button
|
| 170 |
-
|
|
|
|
| 171 |
|
| 172 |
# Keep examples (they'll run process_ocr_task directly) - provide a compact examples widget pointing to the free tab inputs
|
| 173 |
gr.Examples(
|
|
|
|
| 1 |
+
from functools import partial
|
| 2 |
import gradio as gr
|
| 3 |
import torch
|
| 4 |
from transformers import AutoModel, AutoTokenizer
|
|
|
|
| 35 |
|
| 36 |
# --- 2. Main Processing Function (UPDATED for multi-bbox drawing) ---
|
| 37 |
@spaces.GPU
|
| 38 |
+
def process_ocr_task(image, model_size, ref_text, task_type):
|
| 39 |
"""
|
| 40 |
Processes an image with DeepSeek-OCR for all supported tasks.
|
| 41 |
Now draws ALL detected bounding boxes for ANY task.
|
|
|
|
| 152 |
free_output_image = gr.Image(label="๐ผ๏ธ Image Result (if any)", type="pil")
|
| 153 |
|
| 154 |
# Wire Free OCR button
|
| 155 |
+
free_ocr = partial(process_ocr_task, task_type="๐ Free OCR", ref_text="")
|
| 156 |
+
free_btn.click(fn=free_ocr, inputs=[free_image, free_model_size], outputs=[free_output_text, free_output_image])
|
| 157 |
|
| 158 |
with gr.TabItem("Locate"):
|
| 159 |
with gr.Row():
|
|
|
|
| 169 |
loc_output_image = gr.Image(label="๐ผ๏ธ Image Result (if any)", type="pil")
|
| 170 |
|
| 171 |
# Wire Locate button
|
| 172 |
+
pets_detection = partial(process_ocr_task, task_type="๐ Locate Object by Reference", ref_text="pets")
|
| 173 |
+
loc_btn.click(fn=pets_detection, inputs=[loc_image, loc_model_size], outputs=[loc_output_text, loc_output_image])
|
| 174 |
|
| 175 |
# Keep examples (they'll run process_ocr_task directly) - provide a compact examples widget pointing to the free tab inputs
|
| 176 |
gr.Examples(
|