Update app.py
Browse files
app.py
CHANGED
|
@@ -23,7 +23,9 @@ def query(image):
|
|
| 23 |
binary_data = buffer.tobytes()
|
| 24 |
|
| 25 |
response = requests.post(API_URL, headers=headers, data=binary_data)
|
| 26 |
-
|
|
|
|
|
|
|
| 27 |
|
| 28 |
def text_extraction(image):
|
| 29 |
global text_content
|
|
@@ -110,7 +112,7 @@ def inference(image, text, audio, sentiment_option):
|
|
| 110 |
image_sentiment_output = display_sentiment_results(image_sentiment_results, sentiment_option)
|
| 111 |
text_sentiment_output = display_sentiment_results(text_sentiment_results, sentiment_option)
|
| 112 |
|
| 113 |
-
return extracted_image, extracted_facial_data, extracted_text, image_sentiment_output, text_sentiment_output, lang.upper(), result.text,
|
| 114 |
|
| 115 |
title = """<h1 align="center">Cross Model Machine Learning (Sentiment Analysis)</h1>"""
|
| 116 |
image_path = "thmbnail.png"
|
|
@@ -166,19 +168,19 @@ with block:
|
|
| 166 |
gr.HTML(description)
|
| 167 |
|
| 168 |
with gr.Blocks():
|
| 169 |
-
with gr.
|
| 170 |
-
with gr.
|
| 171 |
image = gr.Image()
|
| 172 |
|
| 173 |
image_output = gr.Image()
|
| 174 |
text_output = gr.Textbox(label="Text Content")
|
| 175 |
text_sentiment = gr.Textbox(label="Text Sentiment")
|
| 176 |
-
facial_output = gr.
|
| 177 |
|
| 178 |
with gr.Column():
|
| 179 |
gr.Textbox(label="Text Content")
|
| 180 |
|
| 181 |
-
output_text_sentiment = gr.Textbox("Text Sentiment")
|
| 182 |
|
| 183 |
with gr.Column():
|
| 184 |
audio = gr.Audio(label="Input Audio", show_label=False, type="filepath")
|
|
|
|
| 23 |
binary_data = buffer.tobytes()
|
| 24 |
|
| 25 |
response = requests.post(API_URL, headers=headers, data=binary_data)
|
| 26 |
+
result = {item['label']: item['score'] for item in response.json()}
|
| 27 |
+
|
| 28 |
+
return result
|
| 29 |
|
| 30 |
def text_extraction(image):
|
| 31 |
global text_content
|
|
|
|
| 112 |
image_sentiment_output = display_sentiment_results(image_sentiment_results, sentiment_option)
|
| 113 |
text_sentiment_output = display_sentiment_results(text_sentiment_results, sentiment_option)
|
| 114 |
|
| 115 |
+
return extracted_image, extracted_facial_data, extracted_text, image_sentiment_output, text_sentiment_output, lang.upper(), result.text, audio_sentiment_output
|
| 116 |
|
| 117 |
title = """<h1 align="center">Cross Model Machine Learning (Sentiment Analysis)</h1>"""
|
| 118 |
image_path = "thmbnail.png"
|
|
|
|
| 168 |
gr.HTML(description)
|
| 169 |
|
| 170 |
with gr.Blocks():
|
| 171 |
+
with gr.Column():
|
| 172 |
+
with gr.Row():
|
| 173 |
image = gr.Image()
|
| 174 |
|
| 175 |
image_output = gr.Image()
|
| 176 |
text_output = gr.Textbox(label="Text Content")
|
| 177 |
text_sentiment = gr.Textbox(label="Text Sentiment")
|
| 178 |
+
facial_output = gr.Label(label='Facial Data', container=True, scale=2)
|
| 179 |
|
| 180 |
with gr.Column():
|
| 181 |
gr.Textbox(label="Text Content")
|
| 182 |
|
| 183 |
+
output_text_sentiment = gr.Textbox(label="Text Sentiment")
|
| 184 |
|
| 185 |
with gr.Column():
|
| 186 |
audio = gr.Audio(label="Input Audio", show_label=False, type="filepath")
|