Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from PIL import Image
|
| 3 |
+
import numpy as np
|
| 4 |
+
from transformers import TFAutoModelForSequenceClassification, AutoTokenizer
|
| 5 |
+
from tensorflow.keras.models import load_model
|
| 6 |
+
import ipywidgets as widgets
|
| 7 |
+
from IPython.display import display
|
| 8 |
+
|
| 9 |
+
model_path = 'final_teath_classifier.h5'
|
| 10 |
+
|
| 11 |
+
model = tf.keras.models.load_model(model_path)
|
| 12 |
+
|
| 13 |
+
# Load the model from Hugging Face model hub
|
| 14 |
+
|
| 15 |
+
def preprocess_image(image: Image.Image) -> np.ndarray:
|
| 16 |
+
# Resize the image to match input size
|
| 17 |
+
image = image.resize((256, 256))
|
| 18 |
+
# Convert image to array and preprocess input
|
| 19 |
+
img_array = np.array(image) / 255.0
|
| 20 |
+
# Add batch dimension
|
| 21 |
+
img_array = np.expand_dims(img_array, axis=0)
|
| 22 |
+
return img_array
|
| 23 |
+
|
| 24 |
+
def predict_image(image_path):
|
| 25 |
+
img = Image.open(image_path)
|
| 26 |
+
# Preprocess the image
|
| 27 |
+
img_array = preprocess_image(img)
|
| 28 |
+
# Convert image array to string using base64 encoding (for text-based models)
|
| 29 |
+
inputs = tokenizer.encode(img_array, return_tensors="tf")
|
| 30 |
+
# Make prediction
|
| 31 |
+
outputs = model(inputs)
|
| 32 |
+
predictions = tf.nn.softmax(outputs.logits, axis=-1)
|
| 33 |
+
predicted_class = np.argmax(predictions)
|
| 34 |
+
if predicted_class == 0:
|
| 35 |
+
predict_label = "Clean"
|
| 36 |
+
else:
|
| 37 |
+
predict_label = "Carries"
|
| 38 |
+
|
| 39 |
+
return predict_label, predictions.numpy().flatten()
|
| 40 |
+
|
| 41 |
+
# Create a file uploader widget
|
| 42 |
+
uploader = widgets.FileUpload(accept="image/*", multiple=False)
|
| 43 |
+
|
| 44 |
+
# Display the file uploader widget
|
| 45 |
+
display(uploader)
|
| 46 |
+
|
| 47 |
+
# Define a callback function to handle the uploaded image
|
| 48 |
+
def on_upload(change):
|
| 49 |
+
# Get the uploaded image file
|
| 50 |
+
image_file = list(uploader.value.values())[0]["content"]
|
| 51 |
+
# Save the image to a temporary file
|
| 52 |
+
with open("temp_image.jpg", "wb") as f:
|
| 53 |
+
f.write(image_file)
|
| 54 |
+
# Get predictions for the uploaded image
|
| 55 |
+
predict_label, logits = predict_image("temp_image.jpg")
|
| 56 |
+
# Create a JSON object with the predictions
|
| 57 |
+
predictions_json = {
|
| 58 |
+
"predicted_class": predict_label,
|
| 59 |
+
"evaluations": [f"{logit*100:.4f}%" for logit in logits]
|
| 60 |
+
}
|
| 61 |
+
# Print the JSON object
|
| 62 |
+
print(json.dumps(predictions_json, indent=4))
|
| 63 |
+
|
| 64 |
+
# Set the callback function for when a file is uploaded
|
| 65 |
+
uploader.observe(on_upload, names="value")
|