Spaces:
Sleeping
Sleeping
Update streamlit_app.py
Browse files- streamlit_app.py +19 -9
streamlit_app.py
CHANGED
|
@@ -3,10 +3,19 @@ import cv2
|
|
| 3 |
import numpy as np
|
| 4 |
import tensorflow as tf
|
| 5 |
from PIL import Image
|
|
|
|
|
|
|
| 6 |
import pandas as pd
|
| 7 |
import mediapipe as mp
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
import plotly.express as px
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
st.set_page_config(
|
| 12 |
page_title="ASL Recognition App",
|
|
@@ -15,6 +24,7 @@ st.set_page_config(
|
|
| 15 |
initial_sidebar_state="expanded"
|
| 16 |
)
|
| 17 |
|
|
|
|
| 18 |
st.markdown("""
|
| 19 |
<style>
|
| 20 |
.main-header {
|
|
@@ -24,7 +34,7 @@ st.markdown("""
|
|
| 24 |
margin-bottom: 2rem;
|
| 25 |
}
|
| 26 |
.prediction-box {
|
| 27 |
-
background-color: #262730;
|
| 28 |
padding: 1rem;
|
| 29 |
border-radius: 10px;
|
| 30 |
border-left: 5px solid #1f77b4;
|
|
@@ -84,7 +94,7 @@ class ASLStreamlitApp:
|
|
| 84 |
image = np.expand_dims(image, axis=0)
|
| 85 |
return image
|
| 86 |
|
| 87 |
-
def extract_hand_region(self, image: np.ndarray):
|
| 88 |
try:
|
| 89 |
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 90 |
results = self.hands.process(rgb_image)
|
|
@@ -108,7 +118,7 @@ class ASLStreamlitApp:
|
|
| 108 |
st.error(f"Error extracting hand: {str(e)}")
|
| 109 |
return None, None
|
| 110 |
|
| 111 |
-
def predict_sign(self, image: np.ndarray, use_hand_detection: bool = True):
|
| 112 |
if MODEL is None:
|
| 113 |
st.error("Model not loaded!")
|
| 114 |
return {}
|
|
@@ -147,7 +157,7 @@ class ASLStreamlitApp:
|
|
| 147 |
st.error(f"Prediction error: {str(e)}")
|
| 148 |
return {}
|
| 149 |
|
| 150 |
-
def display_prediction_results(self, results):
|
| 151 |
if not results:
|
| 152 |
return
|
| 153 |
predicted_class = results['predicted_class']
|
|
@@ -185,13 +195,13 @@ class ASLStreamlitApp:
|
|
| 185 |
'confidence': confidence
|
| 186 |
})
|
| 187 |
|
| 188 |
-
def display_image_with_detection(self, results):
|
| 189 |
if not results or 'original_image' not in results:
|
| 190 |
return
|
| 191 |
col1, col2 = st.columns(2)
|
| 192 |
with col1:
|
| 193 |
st.subheader("Original Image")
|
| 194 |
-
original = results['original_image']
|
| 195 |
if results['hand_detected'] and results['bbox']:
|
| 196 |
x_min, y_min, x_max, y_max = results['bbox']
|
| 197 |
cv2.rectangle(original, (x_min, y_min), (x_max, y_max), (0, 255, 0), 3)
|
|
@@ -221,7 +231,7 @@ class ASLStreamlitApp:
|
|
| 221 |
if st.button("Save Word"):
|
| 222 |
if st.session_state.current_word:
|
| 223 |
st.success(f"Saved: '{st.session_state.current_word}'")
|
| 224 |
-
#
|
| 225 |
|
| 226 |
def prediction_history_interface(self):
|
| 227 |
st.subheader("π Prediction History")
|
|
@@ -324,4 +334,4 @@ def main():
|
|
| 324 |
app.run()
|
| 325 |
|
| 326 |
if __name__ == "__main__":
|
| 327 |
-
main()
|
|
|
|
| 3 |
import numpy as np
|
| 4 |
import tensorflow as tf
|
| 5 |
from PIL import Image
|
| 6 |
+
import matplotlib.pyplot as plt
|
| 7 |
+
import seaborn as sns
|
| 8 |
import pandas as pd
|
| 9 |
import mediapipe as mp
|
| 10 |
+
import tempfile
|
| 11 |
+
import os
|
| 12 |
+
import json
|
| 13 |
+
import time
|
| 14 |
+
from typing import List, Dict, Optional
|
| 15 |
import plotly.express as px
|
| 16 |
+
import plotly.graph_objects as go
|
| 17 |
+
from datetime import datetime
|
| 18 |
+
|
| 19 |
|
| 20 |
st.set_page_config(
|
| 21 |
page_title="ASL Recognition App",
|
|
|
|
| 24 |
initial_sidebar_state="expanded"
|
| 25 |
)
|
| 26 |
|
| 27 |
+
|
| 28 |
st.markdown("""
|
| 29 |
<style>
|
| 30 |
.main-header {
|
|
|
|
| 34 |
margin-bottom: 2rem;
|
| 35 |
}
|
| 36 |
.prediction-box {
|
| 37 |
+
background-color: #262730; /* dark gray-blue */
|
| 38 |
padding: 1rem;
|
| 39 |
border-radius: 10px;
|
| 40 |
border-left: 5px solid #1f77b4;
|
|
|
|
| 94 |
image = np.expand_dims(image, axis=0)
|
| 95 |
return image
|
| 96 |
|
| 97 |
+
def extract_hand_region(self, image: np.ndarray) -> Optional[np.ndarray]:
|
| 98 |
try:
|
| 99 |
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 100 |
results = self.hands.process(rgb_image)
|
|
|
|
| 118 |
st.error(f"Error extracting hand: {str(e)}")
|
| 119 |
return None, None
|
| 120 |
|
| 121 |
+
def predict_sign(self, image: np.ndarray, use_hand_detection: bool = True) -> Dict:
|
| 122 |
if MODEL is None:
|
| 123 |
st.error("Model not loaded!")
|
| 124 |
return {}
|
|
|
|
| 157 |
st.error(f"Prediction error: {str(e)}")
|
| 158 |
return {}
|
| 159 |
|
| 160 |
+
def display_prediction_results(self, results: Dict):
|
| 161 |
if not results:
|
| 162 |
return
|
| 163 |
predicted_class = results['predicted_class']
|
|
|
|
| 195 |
'confidence': confidence
|
| 196 |
})
|
| 197 |
|
| 198 |
+
def display_image_with_detection(self, results: Dict):
|
| 199 |
if not results or 'original_image' not in results:
|
| 200 |
return
|
| 201 |
col1, col2 = st.columns(2)
|
| 202 |
with col1:
|
| 203 |
st.subheader("Original Image")
|
| 204 |
+
original = results['original_image']
|
| 205 |
if results['hand_detected'] and results['bbox']:
|
| 206 |
x_min, y_min, x_max, y_max = results['bbox']
|
| 207 |
cv2.rectangle(original, (x_min, y_min), (x_max, y_max), (0, 255, 0), 3)
|
|
|
|
| 231 |
if st.button("Save Word"):
|
| 232 |
if st.session_state.current_word:
|
| 233 |
st.success(f"Saved: '{st.session_state.current_word}'")
|
| 234 |
+
# Save to file/db if needed
|
| 235 |
|
| 236 |
def prediction_history_interface(self):
|
| 237 |
st.subheader("π Prediction History")
|
|
|
|
| 334 |
app.run()
|
| 335 |
|
| 336 |
if __name__ == "__main__":
|
| 337 |
+
main()
|