Spaces:
Sleeping
Sleeping
Adding Image Generation
Browse files
app.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
from transformers import pipeline
|
| 2 |
import gradio as gr
|
|
|
|
| 3 |
|
| 4 |
# 1. text summarizer
|
| 5 |
summarizer = pipeline("summarization", model = "facebook/bart-large-cnn")
|
|
@@ -15,10 +16,14 @@ def get_ner(text):
|
|
| 15 |
|
| 16 |
# 3. Image Captioning
|
| 17 |
caption_model = pipeline("image-to-text", model = "Salesforce/blip-image-captioning-base")
|
| 18 |
-
# processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 19 |
def get_caption(img):
|
| 20 |
output = caption_model(img)
|
| 21 |
return output[0]["generated_text"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
demo = gr.Blocks()
|
|
@@ -31,15 +36,18 @@ with demo:
|
|
| 31 |
sum_btn.click(get_summary, sum_input, sum_output)
|
| 32 |
with gr.Tab("Named Entity Recognition"):
|
| 33 |
ner_input = [gr.Textbox(label="Text to find Entities", placeholder = "Enter text...", lines = 4)]
|
| 34 |
-
# ner_output = gr.Textbox()
|
| 35 |
ner_output = [gr.HighlightedText(label="Text with entities")]
|
| 36 |
ner_btn = gr.Button("Generate entities")
|
| 37 |
-
# allow_flagging = "never"
|
| 38 |
ner_btn.click(get_ner, ner_input, ner_output)
|
| 39 |
with gr.Tab("Image Captioning"):
|
| 40 |
cap_input = [gr.Image(label="Upload Image", type="pil")]
|
| 41 |
cap_btn = gr.Button("Generate Caption")
|
| 42 |
cap_output = [gr.Textbox(label="Caption")]
|
| 43 |
cap_btn.click(get_caption, cap_input, cap_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
demo.launch()
|
|
|
|
| 1 |
from transformers import pipeline
|
| 2 |
import gradio as gr
|
| 3 |
+
from diffusers import DiffusionPipeline
|
| 4 |
|
| 5 |
# 1. text summarizer
|
| 6 |
summarizer = pipeline("summarization", model = "facebook/bart-large-cnn")
|
|
|
|
| 16 |
|
| 17 |
# 3. Image Captioning
|
| 18 |
caption_model = pipeline("image-to-text", model = "Salesforce/blip-image-captioning-base")
|
|
|
|
| 19 |
def get_caption(img):
|
| 20 |
output = caption_model(img)
|
| 21 |
return output[0]["generated_text"]
|
| 22 |
+
|
| 23 |
+
# 4. Image Generation
|
| 24 |
+
img_model = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
|
| 25 |
+
def get_img(prompt):
|
| 26 |
+
return img_model(prompt).images[0]
|
| 27 |
|
| 28 |
|
| 29 |
demo = gr.Blocks()
|
|
|
|
| 36 |
sum_btn.click(get_summary, sum_input, sum_output)
|
| 37 |
with gr.Tab("Named Entity Recognition"):
|
| 38 |
ner_input = [gr.Textbox(label="Text to find Entities", placeholder = "Enter text...", lines = 4)]
|
|
|
|
| 39 |
ner_output = [gr.HighlightedText(label="Text with entities")]
|
| 40 |
ner_btn = gr.Button("Generate entities")
|
|
|
|
| 41 |
ner_btn.click(get_ner, ner_input, ner_output)
|
| 42 |
with gr.Tab("Image Captioning"):
|
| 43 |
cap_input = [gr.Image(label="Upload Image", type="pil")]
|
| 44 |
cap_btn = gr.Button("Generate Caption")
|
| 45 |
cap_output = [gr.Textbox(label="Caption")]
|
| 46 |
cap_btn.click(get_caption, cap_input, cap_output)
|
| 47 |
+
with gr.Tab("Image Generation"):
|
| 48 |
+
img_input = [gr.Textbox(label="Your Text")]
|
| 49 |
+
img_btn = gr.Button("Generate Image")
|
| 50 |
+
img_output = [gr.Image(label="Generated Image")]
|
| 51 |
+
img_btn.click(get_img, img_input, img_output)
|
| 52 |
|
| 53 |
demo.launch()
|