Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -154,7 +154,7 @@ def process_and_generate(image_input, text_prompt, processing_size=512):
|
|
| 154 |
if model.config.pad_token_id is None:
|
| 155 |
model.config.pad_token_id = model.config.eos_token_id
|
| 156 |
|
| 157 |
-
generated_ids = model.generate(**inputs, max_new_tokens=2048, do_sample=True, top_p=0.8, temperature=0.7)
|
| 158 |
|
| 159 |
generated_ids_trimmed = [
|
| 160 |
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
|
@@ -222,7 +222,13 @@ with gr.Blocks() as demo:
|
|
| 222 |
submit_button = gr.Button("Generate Markdown")
|
| 223 |
|
| 224 |
with gr.Row():
|
| 225 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 226 |
|
| 227 |
|
| 228 |
def update_image(url, max_dimension, full_page_capture):
|
|
@@ -236,16 +242,17 @@ with gr.Blocks() as demo:
|
|
| 236 |
def generate_markdown_with_loading(image, user_prompt, processing_size, use_template):
|
| 237 |
# Return a dictionary of updates to show the loading state
|
| 238 |
yield {
|
| 239 |
-
|
|
|
|
| 240 |
submit_button: gr.update(interactive=False)
|
| 241 |
}
|
| 242 |
-
|
| 243 |
# Determine which prompt to use
|
| 244 |
final_prompt = DETAILED_ANALYSIS_PROMPT if use_template else user_prompt
|
| 245 |
-
|
| 246 |
# Process the data
|
| 247 |
result = process_and_generate(image, final_prompt, processing_size)
|
| 248 |
-
|
| 249 |
# Return a dictionary of updates with the final result
|
| 250 |
yield {
|
| 251 |
output_text: result,
|
|
|
|
| 154 |
if model.config.pad_token_id is None:
|
| 155 |
model.config.pad_token_id = model.config.eos_token_id
|
| 156 |
|
| 157 |
+
generated_ids = model.generate(**inputs, max_new_tokens=2048, do_sample=True, top_p=0.8, temperature=0.7)
|
| 158 |
|
| 159 |
generated_ids_trimmed = [
|
| 160 |
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
|
|
|
| 222 |
submit_button = gr.Button("Generate Markdown")
|
| 223 |
|
| 224 |
with gr.Row():
|
| 225 |
+
# --- CHANGED: Switched from gr.Markdown to gr.Textbox for copyable output ---
|
| 226 |
+
output_text = gr.Textbox(
|
| 227 |
+
label="Model Output",
|
| 228 |
+
lines=20,
|
| 229 |
+
interactive=False,
|
| 230 |
+
placeholder="Generated markdown will appear here..."
|
| 231 |
+
)
|
| 232 |
|
| 233 |
|
| 234 |
def update_image(url, max_dimension, full_page_capture):
|
|
|
|
| 242 |
def generate_markdown_with_loading(image, user_prompt, processing_size, use_template):
|
| 243 |
# Return a dictionary of updates to show the loading state
|
| 244 |
yield {
|
| 245 |
+
# --- CHANGED: Updated loading message for Textbox ---
|
| 246 |
+
output_text: "Processing, please wait... ⏳",
|
| 247 |
submit_button: gr.update(interactive=False)
|
| 248 |
}
|
| 249 |
+
|
| 250 |
# Determine which prompt to use
|
| 251 |
final_prompt = DETAILED_ANALYSIS_PROMPT if use_template else user_prompt
|
| 252 |
+
|
| 253 |
# Process the data
|
| 254 |
result = process_and_generate(image, final_prompt, processing_size)
|
| 255 |
+
|
| 256 |
# Return a dictionary of updates with the final result
|
| 257 |
yield {
|
| 258 |
output_text: result,
|